repo_name
stringlengths 7
111
| __id__
int64 16.6k
19,705B
| blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
151
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_url
stringlengths 26
130
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
42
| visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 14.6k
687M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 12
values | gha_fork
bool 2
classes | gha_event_created_at
timestamp[ns] | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_size
int64 0
10.2M
⌀ | gha_stargazers_count
int32 0
178k
⌀ | gha_forks_count
int32 0
88.9k
⌀ | gha_open_issues_count
int32 0
2.72k
⌀ | gha_language
stringlengths 1
16
⌀ | gha_archived
bool 1
class | gha_disabled
bool 1
class | content
stringlengths 10
2.95M
| src_encoding
stringclasses 5
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 10
2.95M
| extension
stringclasses 19
values | num_repo_files
int64 1
202k
| filename
stringlengths 4
112
| num_lang_files
int64 1
202k
| alphanum_fraction
float64 0.26
0.89
| alpha_fraction
float64 0.2
0.89
| hex_fraction
float64 0
0.09
| num_lines
int32 1
93.6k
| avg_line_length
float64 4.57
103
| max_line_length
int64 7
931
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nikaven/drake
| 17,712,445,144,014 |
6ed6df63a6dfcb060da4c77640f42819e89bc176
|
e6793e7eb54d2105c373a8af7ebc653b7ad94575
|
/tools/workspace/drake_visualizer/repository.bzl
|
e43653f11b269aa4c1ef9e6a887078c04514266f
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/nikaven/drake
|
5c59e88f79b530ddf62496452959abeaf8fff1e3
|
34bab4ecaa34ac09ade6dcb11cf7bc0d13c5bd4e
|
refs/heads/master
| 2020-03-31T01:37:25.441270 | 2018-10-05T19:42:22 | 2018-10-05T19:42:22 | 151,788,663 | 7 | 0 | null | true | 2018-10-05T23:37:07 | 2018-10-05T23:37:07 | 2018-10-05T23:30:44 | 2018-10-05T23:27:34 | 338,938 | 0 | 0 | 0 | null | false | null |
# -*- mode: python -*-
# vi: set ft=python :
"""
Downloads and unpacks a precompiled version of drake-visualizer (a subset of
Director, https://git.io/vNKjq) and makes it available to be used as a
dependency of shell scripts.
Archive naming convention:
dv-<version>-g<commit>-python-<python version>-qt-<qt version>
-vtk-<vtk version>-<platform>-<arch>[-<rebuild>]
Build configuration:
BUILD_SHARED_LIBS=OFF
CMAKE_BUILD_TYPE=Release
DD_QT_VERSION=5
USE_EXTERNAL_INSTALL=ON
USE_LCM=ON
USE_LCMGL=ON
USE_SYSTEM_EIGEN=ON
USE_SYSTEM_LCM=ON
USE_SYSTEM_LIBBOT=ON
USE_SYSTEM_VTK=ON
Example:
WORKSPACE:
load(
"@drake//tools/workspace/drake_visualizer:repository.bzl",
"drake_visualizer_repository",
)
drake_visualizer_repository(name = "foo")
BUILD:
sh_binary(
name = "foobar",
srcs = ["bar.sh"],
data = ["@foo//:drake_visualizer"],
)
Argument:
name: A unique name for this rule.
"""
load("@drake//tools/workspace:os.bzl", "determine_os")
# TODO(jamiesnape): Publish scripts used to create binaries. There will be a CI
# job for developers to build new binaries on demand.
def _impl(repository_ctx):
os_result = determine_os(repository_ctx)
if os_result.error != None:
fail(os_result.error)
if os_result.is_macos:
archive = "dv-0.1.0-314-ga5a6f6f-python-2.7.15-qt-5.11.1-vtk-8.1.1-mac-x86_64.tar.gz" # noqa
sha256 = "02f321cf6068068f1aa9747b6b7834c41cd5ccf53aef90ad58229f2c1bfa963c" # noqa
elif os_result.ubuntu_release == "16.04":
archive = "dv-0.1.0-314-ga5a6f6f-python-2.7.12-qt-5.5.1-vtk-8.1.1-xenial-x86_64.tar.gz" # noqa
sha256 = "4bd36e80295006ce4bab57fa57b95b69511623abba80094fb2fdf1eaa18607f9" # noqa
elif os_result.ubuntu_release == "18.04":
archive = "dv-0.1.0-314-ga5a6f6f-python-2.7.15-qt-5.9.5-vtk-8.1.1-bionic-x86_64.tar.gz" # noqa
sha256 = "49d4fe29285ebbc420d19bf91511e36e8b1eb03d23bc7912d982ae12c4b2b36c" # noqa
else:
fail("Operating system is NOT supported", attr = os_result)
urls = [
x.format(archive = archive)
for x in repository_ctx.attr.mirrors.get("director")
]
root_path = repository_ctx.path("")
repository_ctx.download_and_extract(urls, root_path, sha256 = sha256)
file_content = """# -*- python -*-
# DO NOT EDIT: generated by drake_visualizer_repository()
licenses([
"notice", # Apache-2.0 AND BSD-3-Clause AND Python-2.0
"reciprocal", # MPL-2.0
"restricted", # LGPL-2.1-only AND LGPL-2.1-or-later AND LGPL-3.0-or-later
"unencumbered", # Public-Domain
])
# drake-visualizer has the following non-system dependencies in addition to
# those declared in deps:
# bot2-lcmgl: LGPL-3.0-or-later
# ctkPythonConsole: Apache-2.0
# Eigen: BSD-3-Clause AND MPL-2.0 AND Public-Domain
# LCM: BSD-3-Clause AND LGPL-2.1-only AND LGPL-2.1-or-later
# Python: Python-2.0
# PythonQt: LGPL-2.1-only
# QtPropertyBrowser: LGPL-2.1-only
# TODO(jamiesnape): Enumerate system dependencies.
py_library(
name = "drake_visualizer_python_deps",
deps = [
"@lcm//:lcm-python",
"@lcmtypes_bot2_core//:lcmtypes_bot2_core_py",
# TODO(eric.cousineau): Expose VTK Python libraries here for Linux.
"@lcmtypes_robotlocomotion//:lcmtypes_robotlocomotion_py",
],
visibility = ["//visibility:public"],
)
filegroup(
name = "drake_visualizer",
srcs = glob([
"lib/libPythonQt.*",
"lib/libddApp.*",
"lib/python2.7/site-packages/bot_lcmgl/**/*.py",
"lib/python2.7/site-packages/director/**/*.py",
"lib/python2.7/site-packages/director/**/*.so",
"lib/python2.7/site-packages/urdf_parser_py/**/*.py",
]) + [
"bin/drake-visualizer",
"share/doc/director/LICENSE.txt",
],
data = [
":drake_visualizer_python_deps",
"@lcm//:libdrake_lcm.so",
"@vtk",
],
visibility = ["//visibility:public"],
)
load("@drake//tools/install:install.bzl", "install_files")
install_files(
name = "install",
dest = ".",
files = [":drake_visualizer"],
visibility = ["//visibility:public"],
)
"""
repository_ctx.file(
"BUILD.bazel",
content = file_content,
executable = False,
)
drake_visualizer_repository = repository_rule(
attrs = {
"mirrors": attr.string_list_dict(),
},
implementation = _impl,
)
|
UTF-8
|
Python
| false | false | 4,525 |
bzl
| 279 |
repository.bzl
| 262 | 0.631602 | 0.575691 | 0 | 147 | 29.782313 | 103 |
keshav-c17/python
| 755,914,284,463 |
ea636cdefecef6b50ec8ba3ba777ac196f2f1784
|
130ac79a00e2ae323452a3229f1bd6961d6cf1c3
|
/Hacker-rank/17_set_add().py
|
c7c1a33c1af3aec3fa805e36da65d21e048f9195
|
[] |
no_license
|
https://github.com/keshav-c17/python
|
7c4bd1e11b359e131cc9ce2bed91e058b0a3ba54
|
067d4355912a27e512118aed6715113557c8b7e6
|
refs/heads/master
| 2023-05-01T15:15:41.605578 | 2021-05-27T00:16:11 | 2021-05-27T00:16:11 | 322,216,929 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
n = int(input("Enter no. of countries: "))
list_of_countries = set()
for i in range(n):
list_of_countries.add(str(input("Enter names of the countries: ")))
print(len(list_of_countries))
|
UTF-8
|
Python
| false | false | 191 |
py
| 52 |
17_set_add().py
| 52 | 0.680628 | 0.680628 | 0 | 5 | 37 | 71 |
42Paris/remote-challenges
| 12,463,995,113,879 |
d8d99efea4576155ac18120260c0174ad8c692a6
|
cec0e621218352b3685358c697ecbe9c725e6131
|
/chall02/hben-yah.py
|
f3ece231332d21de9edc99e8be282dce4cd57881
|
[] |
no_license
|
https://github.com/42Paris/remote-challenges
|
ecf76c101f5f070455e2f54128faebef18a9b000
|
f9270e7981178243fccebeee2574e3dde91891fd
|
refs/heads/master
| 2021-05-21T19:36:36.177086 | 2020-06-12T12:22:35 | 2020-06-12T12:22:35 | 252,772,141 | 74 | 209 | null | false | 2020-06-12T12:22:37 | 2020-04-03T15:33:34 | 2020-06-05T12:58:47 | 2020-06-12T12:22:36 | 1,920 | 63 | 133 | 1 |
C
| false | false |
#!/usr/bin/python3
import sys
import re
morseDic = {
'a':'.-',
'b':'-...',
'c':'-.-.',
'd':'-..',
'e':'.',
'f':'..-.',
'g':'--.',
'h':'....',
'i':'..',
'j':'.---',
'k':'-.-',
'l':'.-..',
'm':'--',
'n':'-.',
'o':'---',
'p':'.--.',
'q':'--.-',
'r':'.-.',
's':'...',
't':'-',
'u':'..-',
'v':'...-',
'w':'.--',
'x':'-..-',
'y':'-.--',
'z':'--..'
}
def usage():
print("usage: ./hben-yah.py <a-zA-Z string>")
exit()
def str_replace(text, dic):
rc = re.compile('|'.join(map(re.escape, dic)))
def trans(match):
return dic[match.group(0)]
return rc.sub(trans, text)
if len(sys.argv) != 2:
usage()
word = sys.argv[1]
if not re.match("^[A-Za-z0-9 ]*$", word) or len(word) == 0:
usage()
morse = str_replace(word.lower(), morseDic)
print(morse)
|
UTF-8
|
Python
| false | false | 764 |
py
| 713 |
hben-yah.py
| 55 | 0.413613 | 0.40445 | 0 | 55 | 12.909091 | 59 |
stellakaniaru/hacker-rank
| 9,577,777,085,166 |
cc57b81aeff66ac4f143219c31cbca3298507570
|
c2843304f68aaa18f6a3d55221061fb19422b3a6
|
/anagram.py
|
6a0a0053be79509679ff2ac7f16108842e7166b3
|
[] |
no_license
|
https://github.com/stellakaniaru/hacker-rank
|
974ced2514d1ae3ed5afb027b392e2545614e648
|
8dd886c00e4c2522a728b3456d64c8d4712ca2c0
|
refs/heads/master
| 2021-06-06T21:52:54.630431 | 2018-11-15T07:10:21 | 2018-11-15T07:10:21 | 59,646,209 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''Given a a string which is a concatation of two substrings,
determine if the two are anagrams of each other'''
def anagram(word):
'''Should find out if a given word is an anagram or not'''
#check if the word can be divided into equal substrings
#return -1 if the word cant be divided into two equal strings
if len(word) % 2 == 0:
a,b = word[:len(word)/2], word[len(word)/2:]
#sort the substrings to get them in list format
a1 = sorted(a)
b1 = sorted(b)
c = []
#loop through to check if elements in both substrings are equal
#return the elements not in both lists in a new list and output the length of the list
for i in a1:
if i not in b:
c.append(i)
return len(c)
return '-1'
# Tests = input('')
# for i in range(Tests):
# print anagram(str(input()))
|
UTF-8
|
Python
| false | false | 791 |
py
| 15 |
anagram.py
| 14 | 0.677623 | 0.666245 | 0 | 29 | 26.310345 | 88 |
BlueMoon3000/newsapp
| 11,630,771,466,199 |
e2b568a56b7e8a4714e624ef59b4fc2dcb7288a5
|
c42bf7ba6dc853ae6a4802a48cfaccdb8419ea8b
|
/core/migrations/0001_initial.py
|
db7c7052d61da06c122dc533c0f9dd495b65b20a
|
[] |
no_license
|
https://github.com/BlueMoon3000/newsapp
|
6c2b41f4ef9ac85128a3edf680842c04604496dd
|
767f2265cff5896b364a9afd6945181602351f46
|
refs/heads/master
| 2021-01-15T11:49:20.804564 | 2015-12-20T01:34:30 | 2015-12-20T01:34:30 | 15,790,768 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AppUser'
db.create_table(u'core_appuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('email', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=140)),
('fb_user_id', self.gf('django.db.models.fields.CharField')(max_length=20)),
('fb_access_token', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_admin', self.gf('django.db.models.fields.BooleanField')(default=False)),
('last_updated_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'core', ['AppUser'])
# Adding model 'Topic'
db.create_table(u'core_topic', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('last_updated_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'core', ['Topic'])
# Adding model 'SearchTopic'
db.create_table(u'core_searchtopic', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Topic'])),
('is_master', self.gf('django.db.models.fields.BooleanField')(default=False)),
('last_updated_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'core', ['SearchTopic'])
# Adding model 'Article'
db.create_table(u'core_article', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.CharField')(unique=True, max_length=500)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('pub_date', self.gf('django.db.models.fields.DateTimeField')()),
('last_updated_time', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'core', ['Article'])
def backwards(self, orm):
# Deleting model 'AppUser'
db.delete_table(u'core_appuser')
# Deleting model 'Topic'
db.delete_table(u'core_topic')
# Deleting model 'SearchTopic'
db.delete_table(u'core_searchtopic')
# Deleting model 'Article'
db.delete_table(u'core_article')
models = {
u'core.appuser': {
'Meta': {'object_name': 'AppUser'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'fb_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'fb_user_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'last_updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.article': {
'Meta': {'object_name': 'Article'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'})
},
u'core.searchtopic': {
'Meta': {'object_name': 'SearchTopic'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_master': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Topic']"})
},
u'core.topic': {
'Meta': {'object_name': 'Topic'},
'created_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['core']
|
UTF-8
|
Python
| false | false | 7,467 |
py
| 8 |
0001_initial.py
| 5 | 0.582697 | 0.573323 | 0 | 121 | 60.719008 | 118 |
evaseemefly/gsconfig
| 4,269,197,513,147 |
482a819837f765d2997d3f0ba74e7a677340fe5f
|
64bf1b41a05673b3fe64f9a62619975d88bb5410
|
/src/geoserver/customer_base.py
|
bbb84292a56f7c67daa4bbf9d646e571cbed2ecd
|
[
"MIT"
] |
permissive
|
https://github.com/evaseemefly/gsconfig
|
ef1992dafe13268182acbf9a8b33d0e86b8702b6
|
a10900bd256018edf00cd1b508a0d1eb6c3ecba6
|
refs/heads/master
| 2021-02-11T18:47:06.512853 | 2020-03-31T09:36:09 | 2020-03-31T09:36:20 | 244,520,389 | 4 | 1 |
MIT
| true | 2020-03-29T13:48:46 | 2020-03-03T02:12:25 | 2020-03-29T13:35:40 | 2020-03-29T13:48:45 | 1,877 | 1 | 1 | 0 |
Python
| false | false |
from catalog import Catalog
# TODO:[-] 20-03-31 注意此处存在一个问题,由于修改了 gsconfig的源码,所以此处引用时是引用的系统环境中的 gsconfig(geoserver)
from geoserver.workspace import Workspace
# from workspace import Workspace
class BaseCatalog:
'''
所有 cusotmer 子类需要继承的父类
'''
def __init__(self, cat: Catalog, work_space: str):
'''
@param cat: cat 实例
@param work_space: 工作区名称
'''
self.cat = cat
self.work_space = Workspace(cat,work_space)
|
UTF-8
|
Python
| false | false | 585 |
py
| 18 |
customer_base.py
| 12 | 0.637895 | 0.625263 | 0 | 17 | 26.823529 | 86 |
xuelei-code/test
| 10,264,971,874,950 |
13cb93898351bd4b6c92ce89b852ee7b19809f79
|
a4ff9458cbf1b69c2cc85e522c2f23fea34252f9
|
/dataless.py
|
3ab406de3b2e0764058ede8e23b70dc91c4ae703
|
[] |
no_license
|
https://github.com/xuelei-code/test
|
06ace35f19375e728aeba857c5767019e80e78f4
|
0796565ce0a93ed600ca4346e628800e0d4623ac
|
refs/heads/master
| 2020-11-26T23:40:09.485007 | 2019-12-20T09:22:37 | 2019-12-20T09:22:37 | 229,232,123 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 15:48:47 2018
@author: lei
"""
import obspy
from obspy import read, read_inventory
from os import listdir
from obspy.core import UTCDateTime
# 读入时间
t0 = UTCDateTime("2018-04-22T16:30:00.000")
# 读入仪器响应文件
inv = read_inventory('F:/work/mycode/test/FJ.dataless.xml')
pre_filt = [0.001, 0.1, 10, 20]
# 读入波形数据
filepath='F:/work/mycode/test/mseed'
filename_list=listdir(filepath)
for filename in filename_list:
st = read(filepath + '/' + filename,starttime = t0 - 60, endtime = t0 + 300)
# the corresponding response is included in ObsPy as a StationXML file
# the routine automatically picks the correct response for each trace
# define a filter band to prevent amplifying noise during the deconvolution
re_filt = (0.005, 0.1, 10, 11)
Vst = st.remove_response(inventory=inv, output='VEL', pre_filt=pre_filt)
Ast = st.remove_response(inventory=inv, output='ACC', pre_filt=pre_filt)
# Vst.plot()
# Ast.plot()
tr = Vst[0]
print(tr.stats)
|
UTF-8
|
Python
| false | false | 1,064 |
py
| 10 |
dataless.py
| 7 | 0.690661 | 0.633268 | 0 | 35 | 28.4 | 80 |
kishansharma3012/HistoNet
| 2,602,750,187,633 |
c4f2bf91df7e9ff0f59a7956427e802bb7263a46
|
cf377c54bd6ea649703923cce924694e3747168b
|
/code/train.py
|
3428603e403fa63b6725b3742356ae9c3b0de035
|
[] |
no_license
|
https://github.com/kishansharma3012/HistoNet
|
c2424028de90101f80b1e269f0e54855d355b4bc
|
62bb9120027dfe13b0968af59ad49b0bf57ed836
|
refs/heads/master
| 2020-08-12T16:05:45.130289 | 2020-05-02T20:29:26 | 2020-05-02T20:29:26 | 214,796,600 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import lasagne
import pickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.set_cmap('jet')
import sys,os,time,random
import numpy as np
import theano
import theano.tensor as T
from data_utils import import_data, import_data_dsn
seed = 0
random.seed(seed)
np.random.seed(seed)
lasagne.random.set_rng(np.random.RandomState(seed))
ef = 1024.0
def evaluate_histonet(dataset_image, dataset_countmap, dataset_count, dataset_hist, Hist_wt, test_op, data_mean, loss_list,\
visualize = False, path = 'Results/', loss_name = 'w_L1', Loss_wt= [0.5, 0.5,], num_bins = 8):
"""
Function: Evaluate HistoNet network performance on a dataset (For validation and Testing)
Input:
dataset_image : Input images
dataset_countmap: Gt Redundant countmaps
dataset_count: Gt object counts
dataset_hist: Gt object size distribution histograms
Hist_wt: Weights for L1 loss between predicted and target size histogram
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
data_mean: Mean image of training dataset
loss_list: list of different losses (only for initializing the test loss history list)
visualize: Boolean (to visualize the test results)
path : path of the result folder
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
num_bins: number of the bins of histogram
Output:
test_loss_history: list of lists of different losses [loss_total, loss_count, loss_pix, loss_kl, loss_l1, loss_l1_temp, loss_reg (0 for the test)]
gt_count: list of object count in the test samples
pred_count: list of predicted count in the test samples
"""
test_loss_history = [[] for _ in range(len(loss_list))]
gt_count_list = []
pred_count_list = []
batchsize = 1
for i in range(0,len(dataset_image), batchsize):
pred_countmap, pred_hist = test_op(dataset_image, range(i,i+batchsize))
if visualize:
visualize_HistoNet_result(path, i, dataset_image[i], dataset_countmap[i], dataset_count[i], dataset_hist[i], \
pred_countmap, pred_hist, Hist_wt, data_mean, num_bins)
err_pix= np.abs(pred_countmap - dataset_countmap[i:i+batchsize]).mean(axis=(2,3))[0][0]
pred_count = (pred_countmap/(ef)).sum(axis=(1,2,3))
err_count = np.abs((dataset_count[i:i+batchsize]/ef).sum()-pred_count)[0]
y_shape = pred_hist.shape[0]
gt_hist = dataset_hist[i:i+batchsize]
p_prob = pred_hist/pred_hist.sum(axis = 1, keepdims=True) + (1e-6)
p_prob1 = p_prob/p_prob.sum(axis =1, keepdims = True)
t_prob = gt_hist/gt_hist.sum(axis = 1, keepdims=True) + (1e-6)
t_prob1 = t_prob/t_prob.sum(axis = 1, keepdims =True)
kl = (t_prob1*np.log((t_prob1)/(p_prob1)))
err_kl = kl.sum()/y_shape
if loss_name == 'w_L1':
err_l1 = (Hist_wt*np.abs(pred_hist - gt_hist)).sum()/y_shape
err_l1_temp = np.abs(pred_hist - gt_hist).sum()/y_shape
elif loss_name == 'L1':
err_l1 = np.abs(pred_hist - gt_hist).sum()/y_shape
err_l1_temp = (Hist_wt*np.abs(pred_hist - gt_hist)).sum()/y_shape
err_total = Loss_wt[0]*err_kl + Loss_wt[1]*err_l1 + err_pix
test_loss_history = update_loss_history(test_loss_history, [err_total, err_count, err_pix, err_kl, err_l1, err_l1_temp, 0.0])
gt_count_list.append((dataset_countmap[i:i+batchsize]/ef).sum())
pred_count_list.append(pred_count[0])
return test_loss_history, gt_count_list, pred_count_list
def evaluate_histonet_dsn(dataset_image, dataset_countmap, dataset_count, dataset_hist, dataset_hist_dsn1, dataset_hist_dsn2, Hist_wt, Hist_wt_dsn1, Hist_wt_dsn2, test_op, data_mean, loss_list,\
visualize = False, path = 'Results/', loss_name = 'w_L1', Loss_wt= [0.5, 0.5,], num_bins = [2,4,8]):
"""
Function: Evaluate HistoNet network performance on a dataset (For validation and Testing)
Input:
dataset_image : Input images
dataset_countmap: Gt Redundant countmaps
dataset_count: Gt object counts
dataset_hist: Gt object size distribution histograms
dataset_hist_dsn1: Gt object size distribution histograms dsn1
dataset_hist_dsn2: Gt object size distribution histograms dsn2
Hist_wt: Weights for L1 loss between predicted and target size histogram
Hist_wt_dsn1: Weights for L1 loss between predicted and target size histogram dsn1
Hist_wt_dsn2: Weights for L1 loss between predicted and target size histogram dsn2
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
data_mean: Mean image of training dataset
loss_list: list of different losses (only for initializing the test loss history list)
visualize: Boolean (to visualize the test results)
path : path of the result folder
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
num_bins: list of number of bins of histogram [early layer output hist size, middle layer output hist size, final output hist size]
Output:
test_loss_history: list of lists of different losses [loss_total, loss_count, loss_pix, loss_kl, loss_l1, loss_l1_temp, loss_reg (0 for the test)]
gt_count: list of object count in the test samples
pred_count: list of predicted count in the test samples
"""
test_loss_history = [[] for _ in range(len(loss_list))]
gt_count_list = []
pred_count_list = []
batchsize = 1
for i in range(0,len(dataset_image), batchsize):
pred_countmap, pred_hist, pred_hist_dsn1, pred_hist_dsn2 = test_op(dataset_image, range(i,i+batchsize))
if visualize:
visualize_HistoNet_DSN_result(path, i, dataset_image[i], dataset_countmap[i], dataset_count[i], dataset_hist[i], dataset_hist_dsn1[i], dataset_hist_dsn2[i], \
pred_countmap, pred_hist, pred_hist_dsn1, pred_hist_dsn2, Hist_wt, Hist_wt_dsn1, Hist_wt_dsn2, data_mean, num_bins)
err_pix= np.abs(pred_countmap - dataset_countmap[i:i+batchsize]).mean(axis=(2,3))[0][0]
pred_count = (pred_countmap/(ef)).sum(axis=(1,2,3))
err_count = np.abs((dataset_count[i:i+batchsize]/ef).sum()-pred_count)[0]
y_shape = pred_hist.shape[0]
gt_hist = dataset_hist[i:i+batchsize]
p_prob = pred_hist/pred_hist.sum(axis = 1, keepdims=True) + (1e-6)
p_prob1 = p_prob/p_prob.sum(axis =1, keepdims = True)
t_prob = gt_hist/gt_hist.sum(axis = 1, keepdims=True) + (1e-6)
t_prob1 = t_prob/t_prob.sum(axis = 1, keepdims =True)
kl = (t_prob1*np.log((t_prob1)/(p_prob1)))
err_kl = kl.sum()/y_shape
# KL Div loss - DSN1
y_shape_dsn1 = pred_hist_dsn1.shape[0]
gt_hist_dsn1 = dataset_hist_dsn1[i:i+batchsize]
p_prob_dsn1 = pred_hist_dsn1/(pred_hist_dsn1.sum(axis = 1, keepdims=True) + 1e-6) + (1e-6)
p_prob1_dsn1 = p_prob_dsn1/p_prob_dsn1.sum(axis =1, keepdims = True)
t_prob_dsn1 = gt_hist_dsn1/gt_hist_dsn1.sum(axis = 1, keepdims=True) + (1e-6)
t_prob1_dsn1 = t_prob_dsn1/t_prob_dsn1.sum(axis = 1, keepdims =True)
kl_dsn1 = (t_prob1_dsn1*np.log((t_prob1_dsn1)/(p_prob1_dsn1)))
err_kl_dsn1 = kl_dsn1.sum()/y_shape_dsn1
# KL Div loss - DSN2
y_shape_dsn2 = pred_hist_dsn2.shape[0]
gt_hist_dsn2 = dataset_hist_dsn2[i:i+batchsize]
p_prob_dsn2 = pred_hist_dsn2/(pred_hist_dsn2.sum(axis = 1, keepdims=True) + 1e-6) + (1e-6)
p_prob1_dsn2 = p_prob_dsn2/p_prob_dsn2.sum(axis =1, keepdims = True)
t_prob_dsn2 = gt_hist_dsn2/gt_hist_dsn2.sum(axis = 1, keepdims=True) + (1e-6)
t_prob1_dsn2 = t_prob_dsn2/t_prob_dsn2.sum(axis = 1, keepdims =True)
kl_dsn2 = (t_prob1_dsn2*np.log((t_prob1_dsn2)/(p_prob1_dsn2)))
err_kl_dsn2 = kl_dsn2.sum()/y_shape_dsn2
if loss_name == 'w_L1':
err_l1 = (Hist_wt*np.abs(pred_hist - gt_hist)).sum()/y_shape
err_l1_temp = np.abs(pred_hist - gt_hist).sum()/y_shape
err_l1_dsn1 = (Hist_wt_dsn1*np.abs(pred_hist_dsn1 - gt_hist_dsn1)).sum()/y_shape_dsn1
err_l1_temp_dsn1 = np.abs(pred_hist_dsn1 - gt_hist_dsn1).sum()/y_shape_dsn1
err_l1_dsn2 = (Hist_wt_dsn2*np.abs(pred_hist_dsn2 - gt_hist_dsn2)).sum()/y_shape_dsn2
err_l1_temp_dsn2 = np.abs(pred_hist_dsn2 - gt_hist_dsn2).sum()/y_shape_dsn2
elif loss_name == 'L1':
err_l1 = np.abs(pred_hist - gt_hist).sum()/y_shape
err_l1_temp = (Hist_wt*np.abs(pred_hist - gt_hist)).sum()/y_shape
err_l1_dsn1 = np.abs(pred_hist_dsn1 - gt_hist_dsn1).sum()/y_shape_dsn1
err_l1_temp_dsn1 = (Hist_wt_dsn1*np.abs(pred_hist_dsn1 - gt_hist_dsn1)).sum()/y_shape_dsn1
err_l1_dsn2 = np.abs(pred_hist_dsn2 - gt_hist_dsn2).sum()/y_shape_dsn2
err_l1_temp_dsn2 = (Hist_wt_dsn2*np.abs(pred_hist_dsn2 - gt_hist_dsn2)).sum()/y_shape_dsn2
err_total = Loss_wt[0]*(err_kl + err_kl_dsn1 + err_kl_dsn2) + Loss_wt[1]*(err_l1 + err_l1_dsn1 + err_l1_dsn2) + err_pix
test_loss_history = update_loss_history(test_loss_history, [err_total, err_count, err_pix, err_kl, err_kl_dsn1, err_kl_dsn2,\
err_l1, err_l1_dsn1, err_l1_dsn2, err_l1_temp, err_l1_temp_dsn1, err_l1_temp_dsn2, 0.0])
gt_count_list.append((dataset_countmap[i:i+batchsize]/ef).sum())
pred_count_list.append(pred_count[0])
return test_loss_history, gt_count_list, pred_count_list
def save_network(net, file_name, directory):
"""
Function: save network weights
Input:
net : network
file_name: name of the model file
directory: Logging directory for saving model weights
"""
np.savez( directory +'/' + file_name, *lasagne.layers.get_all_param_values(net))
def load_network(net, model_path):
"""
Function: load network weights
Input:
net : network
model_path: path of the model file
"""
with np.load(model_path) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(net, param_values)
def update_loss_history(losses_lists, new_values):
for i in range(len(losses_lists)):
losses_lists[i].append(float(new_values[i]))
return losses_lists
def trainer_histonet(net_count, net_hist, dataset_path, loss_list, placeholder_list, epochs, lr_value , \
lr_decay, batch_size, weight_decay_value, root_result, Experiment_name, \
train_op, test_op, loss_name = 'w_L1', Loss_wt = [0.5, 0.5], num_bins = 8, print_every=50):
"""
Function: Training and evaluating HistoNet network
Input:
net_count : count map output from network
net_hist: histogram vector output from network
dataset_path: path to the directory containing train, val and test dataset pickle files
loss_list: list of different losses for count and size histogram prediction
placeholder_list: list of placeholders for input and target
epochs: number of epochs
lr_value: Learning rate
lr_decay: decay rate of Learning rate (0 - 1.0)
batch_size: batch_size for training
weight_decay_value: L2 regularization strength
root_result: directory to training plots, test results
Experiment_name: Name of the experiment
train_op: Operation for extracting output from network for a given input during training(non deterministic)
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
num_bins: number of bins of histogram
print_every: number of iteration after which to show the progress of training losses
"""
# Import Dataset
train_set, val_set, test_set, data_mean = import_data(dataset_path, num_bins)
np_train_dataset_x, np_train_dataset_y, np_train_dataset_c, np_train_dataset_s = train_set[0], train_set[1], train_set[2], train_set[3]
np_val_dataset_x, np_val_dataset_y, np_val_dataset_c, np_val_dataset_s = val_set[0], val_set[1], val_set[2], val_set[3]
np_test_dataset_x, np_test_dataset_y, np_test_dataset_c, np_test_dataset_s = test_set[0], test_set[1], test_set[2], test_set[3]
print("Imported Data !!....")
# Unpack losses and placeholders
lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
loss_total, loss_count, loss_pix, loss_kl, loss_l1, loss_l1_temp, loss_reg = loss_list
input_var, input_var_ex, target_var, target_var_hist, weight_var_hist = placeholder_list
# Calculate weights for weighted L1 histogram loss
Bins_var = np.linspace(0,200, num_bins + 1)
center_bin_hist = (Bins_var[:-1] + Bins_var[1:])/2
Hist_wt = center_bin_hist/center_bin_hist.sum()
Hist_wt = np.tile(Hist_wt, (batch_size,1)).astype(dtype = np.float64)
# Preparing optimizer
params = lasagne.layers.get_all_params([net_count, net_hist], trainable=True)
updates = lasagne.updates.adam(loss_total, params, learning_rate=lr)
train_fn = theano.function([input_var_ex], [loss_total, loss_count, loss_pix, loss_kl, loss_l1, loss_l1_temp, loss_reg], updates=updates,
givens={input_var:np_train_dataset_x, target_var: np_train_dataset_y, target_var_hist: np_train_dataset_s, weight_var_hist: Hist_wt})
print("Training Function Compiled !!.....")
lr.set_value(lr_value)
best_valid_err = np.inf
datasetlength = len(np_train_dataset_x)
batch_size = 2
datasetlength = 4
print("batch_size", batch_size)
print("lr", lr.eval())
print("datasetlength",datasetlength)
training_plot_path = os.path.join(root_result, 'Training_plots')
model_dir = os.path.join(root_result, Experiment_name)
if not os.path.exists(root_result):
os.mkdir(root_result)
os.mkdir(training_plot_path)
os.mkdir(model_dir)
# Resetting Training and Validation loss per epoch history
train_loss_epoch_history = [[] for _ in range(len(loss_list))]
val_loss_epoch_history = [[] for _ in range(len(loss_list))]
for epoch in range(epochs):
train_loss_history = [[] for _ in range(len(loss_list))]
todo = range(datasetlength)
for i in range(0,datasetlength, batch_size):
ex = todo[i:i+batch_size]
err_total, err_count, err_pix, err_kl, err_l1, err_l1_temp, err_reg = train_fn(ex)
if i%print_every == 0 :
print("Epoch :", epoch," | Iteration : ", i ,"| Total_Loss :",np.around(err_total,2), \
"| Pix Loss :",np.around(err_pix,2), "| Count_Loss : ",np.around(err_count.mean(),2), \
"| kl_Loss:", np.around(err_kl,2), "| l1_Loss:", np.around(err_l1,2), "| l1_Loss_temp:", np.around(err_l1_temp,2), \
"| reg_Loss:", np.around(err_reg,2), "| Learning_rate:", np.around(lr.get_value(),5))
# Updating Loss history
train_loss_history = update_loss_history(train_loss_history, [err_total, err_count, err_pix, err_kl, err_l1, err_l1_temp, err_reg])
# Learning rate decay
lr.set_value(lasagne.utils.floatX(lr.get_value() * lr_decay))
val_loss_history, _, _ = evaluate_histonet(np_val_dataset_x[:4], np_val_dataset_y[:4], np_val_dataset_c[:4], np_val_dataset_s[:4], Hist_wt[0,:], test_op, \
data_mean, loss_list, loss_name = loss_name, Loss_wt = Loss_wt, num_bins= num_bins)
# Updating Loss Epoch history
train_loss_epoch_history = update_loss_history(train_loss_epoch_history, [np.mean(train_loss_history[i]) for i in range(len(train_loss_history))])
val_loss_epoch_history = update_loss_history(val_loss_epoch_history, [np.mean(val_loss_history[i]) for i in range(len(val_loss_history))])
# plot results
plot_results(train_loss_epoch_history[0], val_loss_epoch_history[0], training_plot_path,'Total_loss',1)
plot_results(train_loss_epoch_history[1], val_loss_epoch_history[1], training_plot_path,'Count_loss',2)
plot_results(train_loss_epoch_history[2], val_loss_epoch_history[2], training_plot_path,'Pix_loss',3)
plot_results(train_loss_epoch_history[3], val_loss_epoch_history[3], training_plot_path,'kl_loss',4)
plot_results(train_loss_epoch_history[4], val_loss_epoch_history[4], training_plot_path,'l1_loss',5)
plot_results(train_loss_epoch_history[5], val_loss_epoch_history[5], training_plot_path,'l1_loss_temp',6)
plot_results(train_loss_epoch_history[6], val_loss_epoch_history[6], training_plot_path,'reg_loss',7)
print("Epoch : ",epoch, "| Train_Total_Loss :", np.around(train_loss_epoch_history[0][-1],2), \
"| Val_Total_Loss :", np.around(val_loss_epoch_history[0][-1],2), \
"| Train_Count Loss:",np.around(train_loss_epoch_history[1][-1],2),\
"| Val_Count Loss:",np.around(val_loss_epoch_history[1][-1],2),\
"| Train_Pix_loss:",np.around(train_loss_epoch_history[2][-1],2),\
"| Val_Pix_loss:",np.around(val_loss_epoch_history[2][-1],2),\
"| Train KL_Loss:",np.around(train_loss_epoch_history[3][-1],2), \
"| Val KL_Loss:",np.around(val_loss_epoch_history[3][-1],2), \
"| Train L1_Loss:",np.around(train_loss_epoch_history[4][-1],2), \
"| Val L1_Loss:",np.around(val_loss_epoch_history[4][-1],2))
save_network([net_count, net_hist], 'model_' +str(epoch) + '.npz', model_dir)
# saving best model
if (val_loss_epoch_history[0][-1] < best_valid_err):
best_valid_err = val_loss_epoch_history[0][-1]
save_network([net_count, net_hist], 'model_best.npz', model_dir)
Test_directory = root_result + '/Test_results/'
if not os.path.exists(Test_directory):
os.mkdir(Test_directory)
# Loading best model
load_network([net_count, net_hist], model_dir + '/model_best.npz')
test_loss_history, _, _ = evaluate_histonet(np_test_dataset_x, np_test_dataset_y, np_test_dataset_c, np_test_dataset_s, Hist_wt[0,:], \
test_op, data_mean, loss_list, visualize = True, path = Test_directory, loss_name = loss_name, Loss_wt = Loss_wt, num_bins= num_bins)
# saving results for test dataset
Error_file = open(model_dir + '/Test_result_Summary.txt', "w")
Error_file.write('\n Total_count_Mean abs Error :' + str(np.mean(test_loss_history[1])))
Error_file.write('\n Total_kl_Mean abs Error :' + str(np.mean(test_loss_history[3])))
Error_file.write('\n Total_l1_Mean abs Error :' + str(np.mean(test_loss_history[4])))
Error_file.write('\n Total_l1_ temp Mean abs Error :' + str(np.mean(test_loss_history[5])))
Error_file.close()
def trainer_histonet_dsn(net_count, net_hist, net_hist_dsn1, net_hist_dsn2, dataset_path, loss_list, placeholder_list, epochs, lr_value , \
lr_decay, batch_size, weight_decay_value, root_result, Experiment_name, \
train_op, test_op, loss_name = 'w_L1', Loss_wt = [0.5, 0.5], num_bins = [2,4,8], print_every=50):
"""
Function: Training and evaluating HistoNet DSN network
Input:
net_count : count map output from network
net_hist: histogram vector output of size num_bins[2]from network
net_hist_dsn1: histogram vector output of size num_bins[0] from early part of the network
net_hist_dsn1: histogram vector output of size num_bins[1] from middle part of the network
dataset_path: path to the directory containing train, val and test dataset pickle files
loss_list: list of different losses for count and size histogram prediction
placeholder_list: list of placeholders for input and target
epochs: number of epochs
lr_value: Learning rate
lr_decay: decay rate of Learning rate (0 - 1.0)
batch_size: batch_size for training
weight_decay_value: L2 regularization strength
root_result: directory to training plots, test results
Experiment_name: Name of the experiment
train_op: Operation for extracting output from network for a given input during training(non deterministic)
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
num_bins: list of number of bins of histogram [early layer output hist size, middle layer output hist size, final output hist size]
print_every: number of iteration after which to show the progress of training losses
"""
# Import Dataset
train_set, val_set, test_set, data_mean = import_data_dsn(dataset_path, num_bins)
np_train_dataset_x, np_train_dataset_y, np_train_dataset_c, np_train_dataset_s_dsn1, np_train_dataset_s_dsn2, np_train_dataset_s = train_set[0], train_set[1], train_set[2], train_set[3], train_set[4], train_set[5]
np_val_dataset_x, np_val_dataset_y, np_val_dataset_c, np_val_dataset_s_dsn1, np_val_dataset_s_dsn2, np_val_dataset_s = val_set[0], val_set[1], val_set[2], val_set[3], val_set[4], val_set[5]
np_test_dataset_x, np_test_dataset_y, np_test_dataset_c, np_test_dataset_s_dsn1, np_test_dataset_s_dsn2, np_test_dataset_s = test_set[0], test_set[1], test_set[2], test_set[3], test_set[4], test_set[5]
print("Imported Data !!....")
# Unpack losses and placeholders
lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
loss_total, loss_count, loss_pix, loss_kl, loss_kl_dsn1, loss_kl_dsn2, loss_l1, loss_l1_dsn1, loss_l1_dsn2,\
loss_l1_temp, loss_l1_temp_dsn1, loss_l1_temp_dsn2, loss_reg = loss_list
input_var, input_var_ex, target_var, target_var_hist, target_var_hist_dsn1, target_var_hist_dsn2, \
weight_var_hist, weight_var_hist_dsn1, weight_var_hist_dsn2 = placeholder_list
# Calculate weights for weighted L1 histogram loss num_bins[0]
Bins_var = np.linspace(0,200, num_bins[0] + 1)
center_bin_hist = (Bins_var[:-1] + Bins_var[1:])/2
Hist_wt = center_bin_hist/center_bin_hist.sum()
Hist_wt_dsn1 = np.tile(Hist_wt, (batch_size,1)).astype(dtype = np.float64)
# Calculate weights for weighted L1 histogram loss num_bins[1]
Bins_var = np.linspace(0,200, num_bins[1] + 1)
center_bin_hist = (Bins_var[:-1] + Bins_var[1:])/2
Hist_wt = center_bin_hist/center_bin_hist.sum()
Hist_wt_dsn2 = np.tile(Hist_wt, (batch_size,1)).astype(dtype = np.float64)
# Calculate weights for weighted L1 histogram loss num_bins[2]
Bins_var = np.linspace(0,200, num_bins[2] + 1)
center_bin_hist = (Bins_var[:-1] + Bins_var[1:])/2
Hist_wt = center_bin_hist/center_bin_hist.sum()
Hist_wt = np.tile(Hist_wt, (batch_size,1)).astype(dtype = np.float64)
# Preparing optimizer
params = lasagne.layers.get_all_params([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], trainable=True)
updates = lasagne.updates.adam(loss_total, params, learning_rate=lr)
train_fn = theano.function([input_var_ex], [loss_total, loss_count, loss_pix, loss_kl, loss_kl_dsn1, loss_kl_dsn2, \
loss_l1, loss_l1_dsn1, loss_l1_dsn2, loss_l1_temp, loss_l1_temp_dsn1, loss_l1_temp_dsn2, loss_reg], updates=updates,
givens={input_var:np_train_dataset_x, target_var: np_train_dataset_y, target_var_hist: np_train_dataset_s, weight_var_hist: Hist_wt, \
target_var_hist_dsn1: np_train_dataset_s_dsn1, weight_var_hist_dsn1: Hist_wt_dsn1,\
target_var_hist_dsn2: np_train_dataset_s_dsn2, weight_var_hist_dsn2: Hist_wt_dsn2 })
print("Training Function Compiled !!.....")
lr.set_value(lr_value)
best_valid_err = np.inf
#dataset_length = len(np_train_dataset_x)
dataset_length = 4
print("batch_size", batch_size)
print("lr", lr.eval())
print("datasetlength",dataset_length)
training_plot_path = os.path.join(root_result, 'Training_plots')
model_dir = os.path.join(root_result, Experiment_name)
if not os.path.exists(root_result):
os.mkdir(root_result)
os.mkdir(training_plot_path)
os.mkdir(model_dir)
# Resetting Training and Validation loss per epoch history
train_loss_epoch_history =[[] for _ in range(len(loss_list))]
val_loss_epoch_history = [[] for _ in range(len(loss_list))]
for epoch in range(epochs):
train_loss_history = [[] for _ in range(len(loss_list))]
todo = range(dataset_length)
for i in range(0,dataset_length, batch_size):
ex = todo[i:i+batch_size]
err_total, err_count, err_pix, err_kl, err_kl_dsn1, err_kl_dsn2, err_l1, err_l1_dsn1, err_l1_dsn2,\
err_l1_temp, err_l1_temp_dsn1, err_l1_temp_dsn2, err_reg = train_fn(ex)
if i%print_every == 0 :
print("Epoch :", epoch," | Iteration : ", i ,"| Total_Loss :",np.around(err_total,2), \
"| Pix Loss :",np.around(err_pix,2), "| Count_Loss : ",np.around(err_count.mean(),2), \
"| kl_Loss:", np.around(err_kl,2), "| l1_Loss:", np.around(err_l1,2), "| l1_Loss_temp:", np.around(err_l1_temp,2), \
"| kl_Loss dsn1:", np.around(err_kl_dsn1,2), "| l1_Loss dsn1:", np.around(err_l1_dsn1,2), "| l1_Loss_temp dsn1:", np.around(err_l1_temp_dsn1,2), \
"| kl_Loss dsn2:", np.around(err_kl_dsn2,2), "| l1_Loss dsn2:", np.around(err_l1_dsn2,2), "| l1_Loss_temp dsn2:", np.around(err_l1_temp_dsn2,2), \
"| reg_Loss:", np.around(err_reg,2), "| Learning_rate:", np.around(lr.get_value(),5))
# Updating Loss history
train_loss_history = update_loss_history(train_loss_history, [err_total, err_count, err_pix, err_kl, err_kl_dsn1, err_kl_dsn2,\
err_l1, err_l1_dsn1, err_l1_dsn2, err_l1_temp, err_l1_temp_dsn1, err_l1_temp_dsn2, err_reg])
# Learning rate decay
lr.set_value(lasagne.utils.floatX(lr.get_value() * lr_decay))
val_loss_history, _, _ = evaluate_histonet_dsn(np_val_dataset_x[:4], np_val_dataset_y[:4], np_val_dataset_c[:4], np_val_dataset_s[:4], np_val_dataset_s_dsn1[:4], np_val_dataset_s_dsn2[:4], Hist_wt[0,:], Hist_wt_dsn1[0,:], Hist_wt_dsn2[0,:], test_op, \
data_mean, loss_list, loss_name = loss_name, Loss_wt = Loss_wt, num_bins= num_bins)
# Updating Loss Epoch history
train_loss_epoch_history = update_loss_history(train_loss_epoch_history, [np.mean(train_loss_history[i]) for i in range(len(train_loss_history))])
val_loss_epoch_history = update_loss_history(val_loss_epoch_history, [np.mean(val_loss_history[i]) for i in range(len(val_loss_history))])
# plot results
plot_results(train_loss_epoch_history[0], val_loss_epoch_history[0], training_plot_path,'Total_loss',1)
plot_results(train_loss_epoch_history[1], val_loss_epoch_history[1], training_plot_path,'Count_loss',2)
plot_results(train_loss_epoch_history[2], val_loss_epoch_history[2], training_plot_path,'Pix_loss',3)
plot_results(train_loss_epoch_history[3], val_loss_epoch_history[3], training_plot_path,'kl_loss',4)
plot_results(train_loss_epoch_history[4], val_loss_epoch_history[4], training_plot_path,'kl_loss dsn1',5)
plot_results(train_loss_epoch_history[5], val_loss_epoch_history[5], training_plot_path,'kl_loss dsn2',6)
plot_results(train_loss_epoch_history[6], val_loss_epoch_history[6], training_plot_path,'l1_loss',7)
plot_results(train_loss_epoch_history[7], val_loss_epoch_history[7], training_plot_path,'l1_loss dsn1',8)
plot_results(train_loss_epoch_history[8], val_loss_epoch_history[8], training_plot_path,'l1_loss dsn2',9)
plot_results(train_loss_epoch_history[9], val_loss_epoch_history[9], training_plot_path,'l1_loss_temp',10)
plot_results(train_loss_epoch_history[10], val_loss_epoch_history[10], training_plot_path,'l1_loss_temp dsn1',11)
plot_results(train_loss_epoch_history[11], val_loss_epoch_history[11], training_plot_path,'l1_loss_temp dsn2',12)
plot_results(train_loss_epoch_history[12], val_loss_epoch_history[12], training_plot_path,'reg_loss',13)
print("Epoch : ",epoch, "| Train_Total_Loss :", np.around(train_loss_epoch_history[0][-1],2),\
"| Val_Total_Loss :", np.around(val_loss_epoch_history[0][-1],2),\
"| Train_Count Loss:",np.around(train_loss_epoch_history[1][-1],2),\
"| Val_Count Loss:",np.around(val_loss_epoch_history[1][-1],2),\
"| Train_Pix_loss:",np.around(train_loss_epoch_history[2][-1],2),\
"| Val_Pix_loss:",np.around(val_loss_epoch_history[2][-1],2),\
"| Train KL_Loss:",np.around(train_loss_epoch_history[3][-1],2),\
"| Val KL_Loss:",np.around(val_loss_epoch_history[3][-1],2),\
"| Train L1_Loss:",np.around(train_loss_epoch_history[6][-1],2),\
"| Val L1_Loss:",np.around(val_loss_epoch_history[6][-1],2),\
"| Train KL dsn1_Loss:",np.around(train_loss_epoch_history[4][-1],2),\
"| Val KL_dsn1 Loss:",np.around(val_loss_epoch_history[4][-1],2),\
"| Train L1 dsn1_Loss:",np.around(train_loss_epoch_history[7][-1],2),\
"| Val L1_dsn1 Loss:",np.around(val_loss_epoch_history[7][-1],2),\
"| Train KL_dsn2_Loss:",np.around(train_loss_epoch_history[5][-1],2),\
"| Val KL_dsn2_Loss:",np.around(val_loss_epoch_history[5][-1],2),\
"| Train L1_dsn2_Loss:",np.around(train_loss_epoch_history[8][-1],2),\
"| Val L1_dsn2_Loss:",np.around(val_loss_epoch_history[8][-1],2))
save_network([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], 'model_' +str(epoch) + '.npz', model_dir)
# saving best model
if (val_loss_epoch_history[0][-1] < best_valid_err):
best_valid_err = val_loss_epoch_history[0][-1]
save_network([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], 'model_best.npz', model_dir)
Test_directory = root_result + '/Test_results/'
if not os.path.exists(Test_directory):
os.mkdir(Test_directory)
# Loading best model
load_network([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], model_dir + '/model_best.npz')
test_loss_history, _, _ = evaluate_histonet_dsn(np_test_dataset_x, np_test_dataset_y, np_test_dataset_c, np_test_dataset_s, np_test_dataset_s_dsn1, np_test_dataset_s_dsn2,\
Hist_wt[0,:], Hist_wt_dsn1[0,:], Hist_wt_dsn2[0,:], test_op, data_mean, loss_list, visualize = True, path = Test_directory, loss_name = loss_name, Loss_wt = Loss_wt, num_bins= num_bins)
# saving results for test dataset
Error_file = open(model_dir + '/Test_result_Summary.txt', "w")
Error_file.write('\n Total_count_Mean abs Error :' + str(np.mean(test_loss_history[1])))
Error_file.write('\n Total_kl_Mean abs Error :' + str(np.mean(test_loss_history[3])))
Error_file.write('\n Total_l1_Mean abs Error :' + str(np.mean(test_loss_history[6])))
Error_file.write('\n Total_l1_ temp Mean abs Error :' + str(np.mean(test_loss_history[9])))
Error_file.close()
def loss_func_histonet(net_count, net_hist, input_var, input_var_ex, reg, loss_name = 'w_L1', Loss_wt = [0.5, 0.5]):
"""
Function: Defining Loss functions for training HistoNet network
Input:
net_count : count map output from network
net_hist: histogram vector output from network
input_var: Place holder for input to the network
input_var_ex: Place holder for index for data (input, target)
reg: Regularization strength
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
Output:
loss_list: list of different losses for count and size histogram prediction
placeholder_list: list of placeholders for input and target
train_op: Operation for extracting output from network for a given input during training(non deterministic)
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
"""
# Training forward pass
prediction_count_map, prediction_hist = lasagne.layers.get_output([net_count, net_hist], deterministic=False)
prediction_count = (prediction_count_map/ef).sum(axis=(2,3))
train_op = theano.function([input_var, input_var_ex], [prediction_count_map, prediction_hist])
# Val/Test forward pass
prediction_count_map_t, prediction_hist_t = lasagne.layers.get_output([net_count, net_hist], deterministic=True)
prediction_count_t = (prediction_count_map_t/ef).sum(axis=(2,3))
test_op = theano.function([input_var, input_var_ex], [prediction_count_map_t, prediction_hist_t])
# Placeholders for target and weights for histogram weighted L1 loss
target_var = T.tensor4('target')
target_var_hist = T.matrix('target_hist')
weight_var_hist = T.matrix('weight_hist')
#Mean Absolute Error is computed between each count of the count map, pixel wise
l1_loss = T.abs_(prediction_count_map - target_var[input_var_ex])
loss_pix = l1_loss.mean()
#Mean Absolute Error is computed for the overall count loss
loss_count = T.abs_(prediction_count - (target_var[input_var_ex]/ef).sum(axis=(2,3))).mean()
#KL DIV LOSS between probability distribution of target and predicted histogram to capture shape of histogram
y_shape = prediction_hist.shape[0]
target_hist = target_var_hist[input_var_ex]
p_prob_1 = prediction_hist/prediction_hist.sum(axis = 1, keepdims=True) + (1e-6)
p_prob = p_prob_1/p_prob_1.sum(axis=1, keepdims = True)
t_prob_1 = target_hist/target_hist.sum(axis = 1, keepdims=True) + (1e-6)
t_prob = t_prob_1/t_prob_1.sum(axis = 1, keepdims = True)
kl= (t_prob*T.log((t_prob)/(p_prob)))
loss_kl = kl.sum()/y_shape
# weighted L1 or L1 loss between predicted and target histogram to capture scale of size histogram
if loss_name == 'L1':
loss_l1 = (T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_temp = (weight_var_hist*T.abs_(prediction_hist - target_hist)).sum()/y_shape
elif loss_name == 'w_L1':
loss_l1 = (weight_var_hist*T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_temp = (T.abs_(prediction_hist - target_hist)).sum()/y_shape
# Regularization loss
loss_reg = 0.5*reg*lasagne.regularization.regularize_network_params([net_count, net_hist], lasagne.regularization.l2)
# Total Loss
loss_total = loss_pix + Loss_wt[0]*loss_kl + Loss_wt[1]*loss_l1 + loss_reg
loss_list = [loss_total, loss_count, loss_pix, loss_kl, loss_l1, loss_l1_temp, loss_reg]
placeholder_list = [input_var, input_var_ex, target_var, target_var_hist, weight_var_hist]
return loss_list, placeholder_list, train_op, test_op
def loss_func_histonet_dsn(net_count, net_hist, net_hist_dsn1, net_hist_dsn2, input_var, input_var_ex, reg, loss_name = 'w_L1', Loss_wt = [0.5, 0.5]):
"""
Function: Defining Loss functions for training HistoNet network
Input:
net_count : count map output from network
net_hist: histogram vector output from network
net_hist_dsn1: histogram vector output from early part of the network
net_hist_dsn2: histogram vector output from middle part of the network
input_var: Place holder for input to the network
input_var_ex: Place holder for index for data (input, target)
reg: Regularization strength
loss_name: Name of the L1 loss for histogram (Weighted L1 loss - w_L1 or L1 loss - L1)
Loss_wt: Weights for KL divergence loss and L1 loss for histogram
Output:
loss_list: list of different losses for count and size histogram prediction
placeholder_list: list of placeholders for input and target
train_op: Operation for extracting output from network for a given input during training(non deterministic)
test_op: Operation for extracting output from network for a given input during test and validation(deterministic)
"""
# Training forward pass
prediction_count_map, prediction_hist, prediction_hist_dsn1, prediction_hist_dsn2 = lasagne.layers.get_output([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], deterministic=False)
prediction_count = (prediction_count_map/ef).sum(axis=(2,3))
train_op = theano.function([input_var, input_var_ex], [prediction_count_map, prediction_hist, prediction_hist_dsn1, prediction_hist_dsn2])
# Val/Test forward pass
prediction_count_map_t, prediction_hist_t, prediction_hist_dsn1_t, prediction_hist_dsn2_t = lasagne.layers.get_output([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], deterministic=True)
prediction_count_t = (prediction_count_map_t/ef).sum(axis=(2,3))
test_op = theano.function([input_var, input_var_ex], [prediction_count_map_t, prediction_hist_t, prediction_hist_dsn1_t, prediction_hist_dsn2_t])
# Placeholders for target and weights for histogram weighted L1 loss
target_var = T.tensor4('target')
target_var_hist = T.matrix('target_hist')
weight_var_hist = T.matrix('weight_hist')
target_var_hist_dsn1 = T.matrix('target_hist_dsn1')
weight_var_hist_dsn1 = T.matrix('weight_hist_dsn1')
target_var_hist_dsn2 = T.matrix('target_hist_dsn2')
weight_var_hist_dsn2 = T.matrix('weight_hist_dsn2')
#Mean Absolute Error is computed between each count of the count map, pixel wise
l1_loss = T.abs_(prediction_count_map - target_var[input_var_ex])
loss_pix = l1_loss.mean()
#Mean Absolute Error is computed for the overall count loss
loss_count = T.abs_(prediction_count - (target_var[input_var_ex]/ef).sum(axis=(2,3))).mean()
#KL DIV LOSS between probability distribution of target and predicted histogram (main objective histogram) to capture shape of histogram
y_shape = prediction_hist.shape[0]
target_hist = target_var_hist[input_var_ex]
p_prob_1 = prediction_hist/(prediction_hist.sum(axis = 1, keepdims=True) + 1e-6) + (1e-6)
p_prob = p_prob_1/p_prob_1.sum(axis=1, keepdims = True)
t_prob_1 = target_hist/target_hist.sum(axis = 1, keepdims=True) + (1e-6)
t_prob = t_prob_1/t_prob_1.sum(axis = 1, keepdims = True)
kl= (t_prob*T.log((t_prob)/(p_prob)))
loss_kl = kl.sum()/y_shape
#KL DIV LOSS - DSN1 between probability distribution of target and predicted histogram DSN1 to capture shape of histogram
target_hist_dsn1 = target_var_hist_dsn1[input_var_ex]
p_prob_1_dsn1 = prediction_hist_dsn1/(prediction_hist_dsn1.sum(axis = 1, keepdims=True) + 1e-6) + (1e-6)
p_prob_dsn1 = p_prob_1_dsn1/p_prob_1_dsn1.sum(axis=1, keepdims = True)
t_prob_1_dsn1 = target_hist_dsn1/target_hist_dsn1.sum(axis = 1, keepdims=True) + (1e-6)
t_prob_dsn1 = t_prob_1_dsn1/t_prob_1_dsn1.sum(axis = 1, keepdims = True)
kl_dsn1= (t_prob_dsn1*T.log((t_prob_dsn1)/(p_prob_dsn1)))
loss_kl_dsn1 = kl_dsn1.sum()/y_shape
#KL DIV LOSS - DSN2between probability distribution of target and predicted histogram DSN2 to capture shape of histogram
target_hist_dsn2 = target_var_hist_dsn2[input_var_ex]
p_prob_1_dsn2 = prediction_hist_dsn2/(prediction_hist_dsn2.sum(axis = 1, keepdims=True) + 1e-6) + (1e-6)
p_prob_dsn2 = p_prob_1_dsn2/p_prob_1_dsn2.sum(axis=1, keepdims = True)
t_prob_1_dsn2 = target_hist_dsn2/target_hist_dsn2.sum(axis = 1, keepdims=True) + (1e-6)
t_prob_dsn2 = t_prob_1_dsn2/t_prob_1_dsn2.sum(axis = 1, keepdims = True)
kl_dsn2= (t_prob_dsn2*T.log((t_prob_dsn2)/(p_prob_dsn2)))
loss_kl_dsn2 = kl_dsn2.sum()/y_shape
# weighted L1 or L1 loss between predicted and target histogram to capture scale of size histogram
if loss_name == 'L1':
loss_l1 = (T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_temp = (weight_var_hist*T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_dsn1 = (T.abs_(prediction_hist_dsn1 - target_hist_dsn1)).sum()/y_shape
loss_l1_temp_dsn1 = (weight_var_hist_dsn1*T.abs_(prediction_hist_dsn1 - target_hist_dsn1)).sum()/y_shape
loss_l1_dsn2 = (T.abs_(prediction_hist_dsn2 - target_hist_dsn2)).sum()/y_shape
loss_l1_temp_dsn2 = (weight_var_hist_dsn2*T.abs_(prediction_hist_dsn2 - target_hist_dsn2)).sum()/y_shape
elif loss_name == 'w_L1':
loss_l1 = (weight_var_hist*T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_temp = (T.abs_(prediction_hist - target_hist)).sum()/y_shape
loss_l1_dsn1 = (weight_var_hist_dsn1*T.abs_(prediction_hist_dsn1 - target_hist_dsn1)).sum()/y_shape
loss_l1_temp_dsn1 = (T.abs_(prediction_hist_dsn1 - target_hist_dsn1)).sum()/y_shape
loss_l1_dsn2 = (weight_var_hist_dsn2*T.abs_(prediction_hist_dsn2 - target_hist_dsn2)).sum()/y_shape
loss_l1_temp_dsn2 = (T.abs_(prediction_hist_dsn2 - target_hist_dsn2)).sum()/y_shape
# Regularization loss
loss_reg = 0.5*reg*lasagne.regularization.regularize_network_params([net_count, net_hist, net_hist_dsn1, net_hist_dsn2], lasagne.regularization.l2)
# Total Loss
loss_total = loss_pix + Loss_wt[0]*(loss_kl + loss_kl_dsn1 + loss_kl_dsn2) + Loss_wt[1]*(loss_l1 + loss_l1_dsn1 + loss_l1_dsn2) + loss_reg
loss_list = [loss_total, loss_count, loss_pix, loss_kl, loss_kl_dsn1, loss_kl_dsn2, loss_l1, loss_l1_dsn1, loss_l1_dsn2,\
loss_l1_temp, loss_l1_temp_dsn1, loss_l1_temp_dsn2, loss_reg]
placeholder_list = [input_var, input_var_ex, target_var, target_var_hist, target_var_hist_dsn1, target_var_hist_dsn2, \
weight_var_hist, weight_var_hist_dsn1, weight_var_hist_dsn2]
return loss_list, placeholder_list, train_op, test_op
def plot_results( train_loss, val_loss, Log_dir, plotname, i ):
"""
Function: Plot results training and validation
Input:
train_loss : List of train loss per epoch
val_loss: List of val loss per epoch
Log_dir: Logging directory for saving training plots
plotname: name of the loss plot
i: random integer for different plot
"""
if not os.path.exists(Log_dir):
os.mkdir(Log_dir)
# Saving the train and val loss as .txt file
Error_file = open(Log_dir + '/' + plotname + '_Summary.txt', "w")
Error_file.write('Train_loss: ' + str(train_loss))
Error_file.write('\n val_loss: '+ str(val_loss))
Error_file.close()
# Plotting training curves
plt.figure(i+10, figsize=(15, 10))
plt.plot(range(len(train_loss)), train_loss, 'r', label = 'train')
plt.plot(range(len(val_loss)), val_loss, 'g', label = 'val')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.title('Train & Validation ' + plotname)
plt.savefig(Log_dir + '/' + plotname + '.png')
plt.close()
def visualize_HistoNet_result(path, i, image, gt_countmap, gt_count, gt_hist, pred_countmap, pred_hist, Hist_wt, data_mean, num_bins):
"""
Function: Visualizing results of HistoNet network
Input:
path : path of the result folder
i : image number
gt_countmap: Ground truth redundant count map
gt_count: Ground truth object count
gt_hist: Ground truth object size distribution histogram
pred_countmap: predicted redundant count map
pred_hist: predicted object size distribution histogram
Hist_wt: Weights for L1 loss between predicted and target size histogram
data_mean: Mean image of training dataset
num_bins: number of the bins of histogram
"""
fig = plt.Figure(figsize=(18, 9), dpi=160)
gcf = plt.gcf()
gcf.set_size_inches(18, 15)
fig.set_canvas(gcf.canvas)
pred_count = [(l.sum()/(ef)).astype(np.int) for l in pred_countmap]
img = image.transpose((1,2,0))
#De-Normalize
img = img + data_mean
ax2 = plt.subplot2grid((4,6), (0, 0), colspan=2)
ax3 = plt.subplot2grid((4,6), (0, 2), colspan=5)
ax4 = plt.subplot2grid((4,6), (1, 2), colspan=5)
ax5 = plt.subplot2grid((4,6), (1, 0), rowspan=1)
ax6 = plt.subplot2grid((4,6), (1, 1), rowspan=1)
ax7 = plt.subplot2grid((4,6), (2, 0), colspan=5)
ax8 = plt.subplot2grid((4,6), (3, 0), colspan=5)
ax2.set_title("Input Image")
ax2.imshow(img, interpolation='none', cmap='Greys_r')
ax3.set_title("Target Countmap")
ax3.imshow(np.concatenate((gt_countmap),axis=1), interpolation='none')
ax4.set_title("Predicted Countmap")
ax4.imshow(pred_countmap.squeeze(), interpolation='none')
ax5.set_title("Target " + str(gt_count))
ax5.set_ylim((0, np.max(gt_count)*2))
ax5.set_xticks(np.arange(0, 1, 1.0))
ax5.bar(range(1),gt_count, align='center')
ax6.set_title("Pred " + str(pred_count))
ax6.set_ylim((0, np.max(gt_count)*2))
ax6.set_xticks(np.arange(0, 1, 1.0))
ax6.bar(range(1),pred_count, align='center')
Bins = np.linspace(0,200, num_bins + 1)
ax7.set_title("Gt Histogram")
ax7.hist(Bins[:-1], weights= gt_hist.T , bins = Bins)
ax8.set_title("Pred Histogram")
ax8.hist(Bins[:-1], weights= pred_hist.T , bins = Bins)
if not os.path.exists(path + '/HistoNet_eval_result'):
os.mkdir(path + '/HistoNet_eval_result')
fig.savefig(path + '/HistoNet_eval_result/image-' + str(i) + '.png', bbox_inches='tight', pad_inches=0)
def visualize_HistoNet_DSN_result(path, i, image, gt_countmap, gt_count, gt_hist, gt_hist_dsn1, gt_hist_dsn2, pred_countmap, pred_hist, pred_hist_dsn1, pred_hist_dsn2,\
Hist_wt, Hist_wt_dsn1, Hist_wt_dsn2, data_mean, num_bins):
"""
Function: Visualizing results of HistoNet network
Input:
path : path of the result folder
i : image number
gt_countmap: Ground truth redundant count map
gt_count: Ground truth object count
gt_hist: Ground truth object size distribution histogram dsn2
gt_hist_dsn1: Ground truth object size distribution histogram
gt_hist_dsn2: Ground truth object size distribution histogram dsn1
pred_countmap: predicted redundant count map
pred_hist: predicted object size distribution histogram
pred_hist_dsn1: predicted object size distribution histogram dsn1
pred_hist_dsn2: predicted object size distribution histogram dsn2
Hist_wt: Weights for L1 loss between predicted and target size histogram
Hist_wt_dsn1: Weights for L1 loss between predicted and target size histogram dsn1
Hist_wt_dsn2: Weights for L1 loss between predicted and target size histogram dsn2
data_mean: Mean image of training dataset
num_bins: list of number of bins of histogram [early layer output hist size, middle layer output hist size, final output hist size]
"""
fig = plt.Figure(figsize=(18, 9), dpi=160)
gcf = plt.gcf()
gcf.set_size_inches(18, 15)
fig.set_canvas(gcf.canvas)
pred_count = [(l.sum()/(ef)).astype(np.int) for l in pred_countmap]
img = image.transpose((1,2,0))
#De-Normalize
img = img + data_mean
ax2 = plt.subplot2grid((6,6), (0, 0), colspan=2)
ax3 = plt.subplot2grid((6,6), (0, 2), colspan=5)
ax4 = plt.subplot2grid((6,6), (1, 2), colspan=5)
ax5 = plt.subplot2grid((6,6), (1, 0), rowspan=1)
ax6 = plt.subplot2grid((6,6), (1, 1), rowspan=1)
ax7 = plt.subplot2grid((6,6), (2, 0), colspan=5)
ax8 = plt.subplot2grid((6,6), (3, 0), colspan=5)
ax9 = plt.subplot2grid((6,6), (4, 0), colspan=3)
ax10 = plt.subplot2grid((6,6), (4, 3), colspan=5)
ax11 = plt.subplot2grid((6,6), (5, 0), colspan=3)
ax12 = plt.subplot2grid((6,6), (5, 3), colspan=5)
ax2.set_title("Input Image")
ax2.imshow(img, interpolation='none', cmap='Greys_r')
ax3.set_title("Target Countmap")
ax3.imshow(np.concatenate((gt_countmap),axis=1), interpolation='none')
ax4.set_title("Predicted Countmap")
ax4.imshow(pred_countmap.squeeze(), interpolation='none')
ax5.set_title("Target " + str(gt_count))
ax5.set_ylim((0, np.max(gt_count)*2))
ax5.set_xticks(np.arange(0, 1, 1.0))
ax5.bar(range(1),gt_count, align='center')
ax6.set_title("Pred " + str(pred_count))
ax6.set_ylim((0, np.max(gt_count)*2))
ax6.set_xticks(np.arange(0, 1, 1.0))
ax6.bar(range(1),pred_count, align='center')
Bins = np.linspace(0,200, num_bins[2] + 1)
ax7.set_title("Gt Histogram")
ax7.hist(Bins[:-1], weights= gt_hist.T , bins = Bins)
ax8.set_title("Pred Histogram")
ax8.hist(Bins[:-1], weights= pred_hist.T , bins = Bins)
Bins2 = np.linspace(0,200, num_bins[0] + 1)
ax9.set_title("Gt Histogram dsn1")
ax9.hist(Bins2[:-1], weights= gt_hist_dsn1.T , bins = Bins2)
ax11.set_title("Pred Histogram dsn1")
ax11.hist(Bins2[:-1], weights= pred_hist_dsn1.T , bins = Bins2)
Bins4 = np.linspace(0,200,num_bins[1] + 1)
ax10.set_title("Gt Histogram dsn2")
ax10.hist(Bins4[:-1], weights= gt_hist_dsn2.T , bins = Bins4)
ax12.set_title("Pred Histogram dsn2")
ax12.hist(Bins4[:-1], weights= pred_hist_dsn2.T , bins = Bins4)
if not os.path.exists(path + '/HistoNet_DSN_eval_result'):
os.mkdir(path + '/HistoNet_DSN_eval_result')
fig.savefig(path + '/HistoNet_DSN_eval_result/image-' + str(i) + '.png', bbox_inches='tight', pad_inches=0)
|
UTF-8
|
Python
| false | false | 50,094 |
py
| 7 |
train.py
| 5 | 0.647682 | 0.620533 | 0 | 905 | 54.352486 | 259 |
felipeumanzor/chilean_parliament_data
| 18,004,502,937,562 |
3504c97d3d645fefe12a4423ef41eab5d0ee8442
|
36ab20069d9da6168df6e703c8fa35d19bcb6292
|
/votoparlamentario/urls.py
|
a98f87c8d4387af91c3e17fba87cdb263e0268fe
|
[] |
no_license
|
https://github.com/felipeumanzor/chilean_parliament_data
|
86570f3bbedaa9968dc7c7e83b801c8c5fb69d68
|
4670db93f03eee9b71e3378b09ce9a200e9a4364
|
refs/heads/master
| 2020-08-27T12:02:08.428061 | 2019-11-12T05:53:08 | 2019-11-12T05:53:08 | 217,361,374 | 0 | 0 | null | false | 2020-06-06T00:28:52 | 2019-10-24T17:57:17 | 2019-11-12T05:53:14 | 2020-06-06T00:28:50 | 47 | 0 | 0 | 1 |
Python
| false | false |
from api.views import (
get_diputados,
get_proyectos,
get_proyectos_by_author,
get_votos_by_diputado,
get_gastos_by_diputado,
get_diputado_by_id,
get_proyecto_by_id,
)
"""votoparlamentario URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path("admin/", admin.site.urls),
path("diputados/", get_diputados),
path("diputados/<diputado_id>/", get_diputado_by_id),
path("diputados/<diputado_id>/votos/", get_votos_by_diputado),
path("diputados/<diputado_id>/gastos/", get_gastos_by_diputado),
path("proyectos/", get_proyectos),
path("proyectos/by_author/<author_id>/", get_proyectos_by_author),
path("proyectos/<proyecto_id>/", get_proyecto_by_id),
]
|
UTF-8
|
Python
| false | false | 1,357 |
py
| 7 |
urls.py
| 6 | 0.689757 | 0.683861 | 0 | 38 | 34.710526 | 77 |
Steven-Ireland/ProfessorWho-Scraper
| 3,599,182,619,290 |
3e81adf3653ab347045460be984c85f818a42a6c
|
8425fb50a7f22d458875dd7154f4e7f8af8d2106
|
/parse.py
|
fb3e8568e33c7c489d5844849db1e8c912bee2ca
|
[] |
no_license
|
https://github.com/Steven-Ireland/ProfessorWho-Scraper
|
2814cde9ab9a21326cea253ac6daa307aaf37afb
|
39299f495cce0ea50bbea2349a9109c7ff1b4f48
|
refs/heads/master
| 2016-08-07T20:23:44.085160 | 2015-02-04T02:09:21 | 2015-02-04T02:09:21 | 29,502,390 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import re
class Parser: # works for 1-level html structures like table rows, but nesting breaks
def __init__(self, dom, attr="html"):
self.str = dom
self.attr= attr
def find(self, q):
tag = "<"+q+".*?>.*?</"+q+">" # <q.*?>.*?</q>
ret = []
for found in re.findall(tag, self.str, re.DOTALL|re.IGNORECASE):
ret.append(Parser(found, q))
return ret
def text(self):
locator = "<"+self.attr+".*?>(.*?)</"+self.attr+">"
innerHTML = re.search(locator, self.str, re.DOTALL|re.IGNORECASE).group(0) # <me>(innerHTML)</me>
#Go through and find all plaintext inside element, i.e
#<1> <2 class="BORDER">Hello World</2> !</1>
#should return "Hello World!"
plaintext = ""
unclosedABs = 0 #Angle Brackets
for c in innerHTML:
if (c == '<'):
unclosedABs+=1
continue #Move on
elif (c == '>'):
unclosedABs-=1
continue #Move on
if (unclosedABs == 0):
plaintext+=c
return plaintext
def prop(self, attribute):
myStartTag = re.search("<"+self.attr+".*?>", self.str, re.IGNORECASE).group(0)
locator = attribute+"=\"(.*?)\"" # <me attribute="(/a/b/c)">
return re.search(locator, myStartTag, re.IGNORECASE).group(1)
def html(self):
return self.str
|
UTF-8
|
Python
| false | false | 1,203 |
py
| 6 |
parse.py
| 6 | 0.612635 | 0.60266 | 0 | 43 | 27 | 99 |
faridrashidi/trisicell
| 15,814,069,613,706 |
58bf0bff9b86d4fac731ba03505c9a1e3b529fb7
|
5e84c48158341afa34eae19a2e0db26b11d8a267
|
/trisicell/pp/__init__.py
|
5009d08d80e6a62d04e3c24c033248bc973eb7b2
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/faridrashidi/trisicell
|
53fbd97efd4121f717babd218661edbce48cb10e
|
eff50c7885053250656a60040612e8097c5032a1
|
refs/heads/master
| 2023-07-24T03:54:25.472928 | 2023-07-21T06:31:18 | 2023-07-21T06:31:18 | 342,698,508 | 6 | 2 |
BSD-3-Clause
| false | 2023-07-21T14:42:58 | 2021-02-26T20:54:06 | 2023-02-23T20:10:13 | 2023-07-21T14:42:57 | 162,366 | 8 | 2 | 0 |
Python
| false | false |
"""Pre/Post-Processing Module."""
from trisicell.pp._bifiltering import bifiltering
from trisicell.pp._binary import (
binarym_filter_clonal_mutations,
binarym_filter_nonsense_mutations,
binarym_filter_private_mutations,
binarym_statistics,
consensus_combine,
)
from trisicell.pp._readcount import (
build_scmatrix,
filter_mut_mutant_must_present_in_at_least,
filter_mut_reference_must_present_in_at_least,
filter_mut_vaf_greater_than_coverage_mutant_greater_than,
filter_snpeff,
group_obs_apply_func,
keep_cell_by_list,
keep_mut_by_list,
local_cluster_cells_then_merge_muts_pseudo_bulk,
remove_cell_by_list,
remove_mut_by_list,
statistics,
)
from trisicell.pp._tree import collapse
__all__ = (
bifiltering,
binarym_filter_clonal_mutations,
binarym_filter_nonsense_mutations,
binarym_filter_private_mutations,
binarym_statistics,
consensus_combine,
build_scmatrix,
filter_mut_mutant_must_present_in_at_least,
filter_mut_reference_must_present_in_at_least,
filter_mut_vaf_greater_than_coverage_mutant_greater_than,
filter_snpeff,
keep_cell_by_list,
keep_mut_by_list,
remove_cell_by_list,
remove_mut_by_list,
statistics,
collapse,
group_obs_apply_func,
local_cluster_cells_then_merge_muts_pseudo_bulk,
)
|
UTF-8
|
Python
| false | false | 1,348 |
py
| 220 |
__init__.py
| 180 | 0.712166 | 0.712166 | 0 | 47 | 27.680851 | 61 |
leeil525/Python
| 3,341,484,604,058 |
5fe01d930661e24d8f856082989ba1534e0c6586
|
3bfc8c92a9aae68a7bd0aaf9fef7e208d0312cb1
|
/selenium.py
|
2cde1f255e67891265eec05ed7cf9fa0c75b396e
|
[] |
no_license
|
https://github.com/leeil525/Python
|
7abcfecdc86a55939affc0754c59f747bf3a8973
|
cee85158428cc4922d2a4b9f14d56fada2e726a0
|
refs/heads/master
| 2021-01-20T09:06:51.671598 | 2021-01-20T06:30:31 | 2021-01-20T06:30:31 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys #输入的键盘位键操作
from selenium.webdriver.support.ui import Select #选项卡操作
from selenium.webdriver import ActionChains #拖拽元素
from selenium.webdriver.chrome.options import Options #设置Chrome手机模式
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities #设置PhantomJS手机模式
#browser = webdriver.PhantomJS(executable_path='D:\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe') PhantomJS 用法
#browser=webdriver.Chrome()
'''PhantomJs手机模式并截图'''
# dcap = dict(DesiredCapabilities.PHANTOMJS)
# dcap["phantomjs.page.settings.userAgent"] = (
# "Mozilla/5.0 (Linux; Android 5.1.1; Nexus 6 Build/LYZ28E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36"
# )
# driver = webdriver.PhantomJS(desired_capabilities=dcap)
# driver.get("https://bilibili.com")
# driver.get_screenshot_as_file('02.png')
# driver.quit()
'''Chrome手机模式'''
# mobile_emulation = {"deviceName":"Apple iPhone 6"}
# chrome_options = Options()
# chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
# browser = webdriver.Chrome(chrome_options=chrome_options) # 手机模式浏览
# browser.get("http://www.bilibili.com") #打开bilibili
'''查找元素'''
#element = browser.find_element_by_id("q") #根据ID查找
#element = browser.find_element_by_css_selector("input") #根据css选择器查找
#element = browser.find_element_by_name("passwd") #根据name属性查找 name='passwd'
#element = browser.find_elements_by_tag_name("input") #根据标签查找<input></input>
#element = browser.find_element_by_xpath('//*[@id="banner_link"]/div/div/form/input') #根据XPATH语法查找(搜索输入框)
#上面是单个 在element后加s就OK
'''向表单填写,按键,点击'''
#element.send_keys("test") #写入test
#element.clear() #清除之前的文本
#element.send_keys("test2") #写入test
#element.send_keys(Keys.RETURN) #模拟按enter
#element.click() #鼠标点击
'''选项卡操作'''
#select = Select(driver.find_element_by_name('name'))
#select.select_by_index(index) #根据索引来选择
#select.select_by_visible_text("text") #根据值来选择
#select.select_by_value(value) #根据文字来选择
#select.deselect_all() #取消全部的选择
'''拖拽元素'''
#element2 = browser.find_element_by_xpath('//*[@id="banner_link"]/div/div/form/input')
#element1 = browser.find_element_by_xpath('//*[@id="banner_link"]/div/div/a')
#action_chains = ActionChains(browser)
#action_chains.drag_and_drop(element2, element1).perform() #2拖到1
#ActionChains(browser).drag_and_drop_by_offset(element2,10,10).perform() #把element2拖动(10,10)的距离,即向右下方拖动
#browser.close() #关闭浏览器
|
UTF-8
|
Python
| false | false | 2,938 |
py
| 30 |
selenium.py
| 24 | 0.715066 | 0.69555 | 0 | 64 | 38.75 | 140 |
sirius9/Project
| 6,124,623,389,661 |
1fa5fb75701bcae39f54eb46e8754039bfac04dc
|
574e4f93ebc4b557f82599c5ce9558c0391ea5ba
|
/03_Premierleague/function.py
|
738f34776f181b9fde6eeabceeee52f2f7caef4d
|
[] |
no_license
|
https://github.com/sirius9/Project
|
950696144748cba58bfc69bc8febbf0a0defcaff
|
7f4de79ee722284396b7950b4d1e956c998bb7d9
|
refs/heads/master
| 2020-04-09T04:33:37.163893 | 2018-08-27T01:20:57 | 2018-08-27T01:20:57 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# 0. basic
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
# 1. EDA
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.pyplot import *
# Statistic
import scipy.stats as stats
import urllib
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
import matplotlib.pyplot as plt
# 2. Feature Select
from sklearn.preprocessing import LabelEncoder
def category_to_ohe(train_col, test_col):
'''
Train / Test 데이터에서 카테고리 데이터에 라벨링을 하기 위한 함수입니다.
'''
le = LabelEncoder()
le.fit(train_col)
labeled_train_col = le.transform(train_col)
labeled_train_col = labeled_train_col.reshape(len(labeled_train_col),1)
labeled_test_col = le.transform(test_col)
labeled_test_col = labeled_test_col.reshape(len(labeled_test_col),1)
return labeled_train_col, labeled_test_col
# 3. Modeling
from statsmodels.stats.outliers_influence import variance_inflation_factor
# 3.1 조건부 확률기반 생성모형
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis # QDA
from sklearn.naive_bayes import * # Naive basesian
# 3.2 조건부 확률기반 판별모형
from sklearn.tree import DecisionTreeClassifier
# 3.3 모형결합 (Ensenble)
from sklearn.ensemble import VotingClassifier # voting
from sklearn.ensemble import BaggingClassifier # bagging
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # random Forest
from sklearn.ensemble import AdaBoostClassifier # AdaBoost
from sklearn.ensemble import GradientBoostingClassifier # GradientBoost
import xgboost # xgboost
# 3.4 판별함수 모형
from sklearn.linear_model import Perceptron # perceptron
from sklearn.linear_model import SGDClassifier # SGD
from sklearn.svm import SVC # support vector machine
# 4. Optimizer
from sklearn.model_selection import validation_curve # validation curve
from sklearn.model_selection import GridSearchCV # gridseach
from sklearn.model_selection import ParameterGrid # ParameterGrid
# 5. Evaluation
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import * # make confusion matrix
from sklearn.preprocessing import label_binarize # ROC curve
from sklearn.metrics import auc # AUC
from sklearn.metrics import roc_curve
|
UTF-8
|
Python
| false | false | 2,505 |
py
| 40 |
function.py
| 4 | 0.794893 | 0.788196 | 0 | 81 | 28.493827 | 89 |
geedubey/soware
| 13,297,218,788,514 |
678ddc56c064f14b6bbbefc1f985f9f9cc6eee63
|
ece9ae460f93b877313c930af7d2e36219cd0942
|
/link.py
|
e0e02b78adefa0aabb8aa5e4886bffd168e5823a
|
[] |
no_license
|
https://github.com/geedubey/soware
|
c49ae6c740e8c0da3879856236eeb23d91c997d4
|
138a204f10cfc6f0926a6773a5540c7943ff9adf
|
refs/heads/master
| 2020-03-23T09:29:40.880732 | 2018-09-09T17:57:44 | 2018-09-09T17:57:44 | 141,391,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Credentials you get from registering a new application
client_id = '77xdxq5i655wrv'
client_secret = 'Ww37UQ9kcZ9Q9RVD'
# OAuth endpoints given in the LinkedIn API documentation
authorization_base_url = 'https://www.linkedin.com/uas/oauth2/authorization'
token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'
from requests_oauthlib import OAuth2Session
from requests_oauthlib.compliance_fixes import linkedin_compliance_fix
import json
linkedin = OAuth2Session(client_id, redirect_uri='https://localhost:8000/')
linkedin = linkedin_compliance_fix(linkedin)
# Redirect user to LinkedIn for authorization
authorization_url, state = linkedin.authorization_url(authorization_base_url)
print ('Please go here and authorize,', authorization_url)
# Get the authorization verifier code from the callback url
redirect_response = input('Paste the full redirect URL here:')
# Fetch the access token
linkedin.fetch_token(token_url, client_secret=client_secret, authorization_response=redirect_response)
# Fetch a protected resource, i.e. user profile
r = linkedin.get('https://api.linkedin.com/v1/people/~')
#r = linkedin.get("https://api.linkedin.com/v2/me")
print (r.content)
#sharepost = { "comment": "Check out developer.linkedin.com! http://linkd.in/1FC2PyG", "visibility": {"code": "anyone"}}
#p = linkedin.post("https://api.linkedin.com/v1/people/~/shares?format=json", json=sharepost)
#print (p.content)
comment = {
"actor": "urn:li:person:t3gGTvjaJQ",
"message": {
"attributes": [],
"text": "This is a comment on POST developer.linkedin.com"
}
}
cp = linkedin.post("https://api.linkedin.com/v2/socialActions/urn:li:share:6443893670432243712/comments", json=comment)
print (cp.content)
#urn:li:share:6443893670432243712
|
UTF-8
|
Python
| false | false | 1,761 |
py
| 6 |
link.py
| 5 | 0.755253 | 0.71891 | 0 | 42 | 40.952381 | 122 |
shantanu609/PreCourse_1
| 3,839,700,768,747 |
98919ef1f3ef37bfe8811dce5734a53a0ddf0783
|
ce3418afa332e88827068bef0b996b820b144cba
|
/Exercise_3.py
|
44ecfdaec6c167fdaedc2313104372c23ce34966
|
[] |
no_license
|
https://github.com/shantanu609/PreCourse_1
|
e7d32108b594d6ff0ff09d4cb9e6b5fb690e7af7
|
68a0929bc183aea6f4063ebb5faaf67cbbe24794
|
refs/heads/master
| 2022-07-17T09:32:53.746587 | 2020-05-20T01:36:46 | 2020-05-20T01:36:46 | 265,414,617 | 0 | 0 | null | true | 2020-05-20T01:30:13 | 2020-05-20T01:30:13 | 2019-12-16T05:35:35 | 2020-05-20T01:14:40 | 20 | 0 | 0 | 0 | null | false | false |
class ListNode:
"""
A node in a singly-linked list.
"""
def __init__(self, data=None, next=None):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__(self):
"""
Create a new singly-linked list.
Takes O(1) time.
"""
self.head = ListNode(0)
# O(N) time | O(1) Space
def append(self, data):
"""
Insert a new element at the end of the list.
Takes O(n) time.
"""
node = self.head
while node.next is not None:
node = node.next
node.next = ListNode(data)
def find(self, key):
"""
Search for the first element with `data` matching
`key`. Return the element or `None` if not found.
Takes O(n) time.
"""
node = self.head.next
while node:
if node.data == key:
return True
node = node.next
return False
# O(N) time | O(1) Space
def remove(self, key):
"""
Remove the first occurrence of `key` in the list.
Takes O(n) time.
"""
node = self.head.next
prev = self.head
while node:
if node.data == key:
prev.next = node.next
break
node = node.next
prev = prev.next
if __name__ == '__main__':
sl = SinglyLinkedList()
sl.append(1)
sl.append(2)
sl.append(3)
res = sl.find(2)
print(res)
sl.remove(2)
print(sl.find(2))
|
UTF-8
|
Python
| false | false | 1,545 |
py
| 1 |
Exercise_3.py
| 1 | 0.487379 | 0.480906 | 0 | 65 | 22.784615 | 57 |
Mepond/bitcoin-abc
| 6,914,897,364,399 |
97f6ecca5fc66149ff312c7b4d5af33dbf5a3520
|
781320936dfd058d70873d3e05a7780829267f49
|
/test/functional/abc_p2p_avalanche_quorum.py
|
eba9b7c46a7a4928ed8211b89360720f3b55d00e
|
[
"MIT"
] |
permissive
|
https://github.com/Mepond/bitcoin-abc
|
38ac072f8e7832bc1e2bd6e395251e97d1beb4a1
|
47f7a91b4c8911fd1cd05102de1f7e622e8855d8
|
refs/heads/master
| 2022-03-27T17:56:18.927737 | 2022-03-24T03:11:48 | 2022-03-24T03:12:09 | 227,140,014 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020-2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the quorum detection of avalanche."""
from time import time
from test_framework.avatools import (
create_coinbase_stakes,
get_ava_p2p_interface,
)
from test_framework.key import ECKey, ECPubKey
from test_framework.messages import AvalancheVote, AvalancheVoteError
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet_util import bytes_to_wif
class AvalancheQuorumTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [
['-enableavalanche=1',
'-avacooldown=0',
'-avatimeout=0',
'-avaminquorumstake=100000000',
'-avaminquorumconnectedstakeratio=0.8']
]
def mock_forward(self, delta):
self.mock_time += delta
self.nodes[0].setmocktime(self.mock_time)
def run_test(self):
self.mock_time = int(time())
self.mock_forward(0)
# Create a local node to poll from and a helper to send polls from it
# and assert on the response
node = self.nodes[0]
poll_node = get_ava_p2p_interface(node)
poll_node_pubkey = ECPubKey()
poll_node_pubkey.set(bytes.fromhex(node.getavalanchekey()))
def poll_and_assert_response(expected):
# Send poll for best block
block = int(node.getbestblockhash(), 16)
poll_node.send_poll([block])
# Get response and check that the vote is what we expect
response = poll_node.wait_for_avaresponse()
r = response.response
assert poll_node_pubkey.verify_schnorr(response.sig, r.get_hash())
assert_equal(len(r.votes), 1)
actual = repr(r.votes[0])
expected = repr(AvalancheVote(expected, block))
assert_equal(actual, expected)
# Create peers to poll
num_quorum_peers = 2
coinbase_key = node.get_deterministic_priv_key().key
blocks = node.generate(num_quorum_peers)
peers = []
for i in range(0, num_quorum_peers):
keyHex = "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f75" + \
str(i)
k = ECKey()
k.set(bytes.fromhex(keyHex), True)
stakes = create_coinbase_stakes(
node, [blocks[i]], coinbase_key)
proof = node.buildavalancheproof(1, 1, bytes_to_wif(k.get_bytes()),
stakes)
peers.append({'key': k, 'proof': proof, 'stake': stakes})
def addavalanchenode(peer):
pubkey = peer['key'].get_pubkey().get_bytes().hex()
assert node.addavalanchenode(
peer['node'].nodeid, pubkey, peer['proof']) is True
# Start polling. The response should be UNKNOWN because there's no
# score
poll_and_assert_response(AvalancheVoteError.UNKNOWN)
# Create one peer with half the score and add one node
peers[0]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[0])
poll_and_assert_response(AvalancheVoteError.UNKNOWN)
# Create a second peer with the other half and add one node
peers[1]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[1])
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
# Disconnect peer 1's node which drops us below the threshold, but we've
# latched that the quorum is established
self.mock_forward(1)
peers[1]['node'].peer_disconnect()
peers[1]['node'].wait_for_disconnect()
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
# Reconnect node and re-establish quorum
peers[1]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[1])
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
if __name__ == '__main__':
AvalancheQuorumTest().main()
|
UTF-8
|
Python
| false | false | 4,236 |
py
| 88 |
abc_p2p_avalanche_quorum.py
| 73 | 0.626534 | 0.60458 | 0 | 111 | 37.162162 | 90 |
kitoku-magic/user_registration_form
| 7,524,782,746,933 |
3116871426f215153eed6efe827f350eaf3ac484
|
b79b540921ba050ee1c12456890414a80c14e639
|
/python/src/controller/user_registration/user_registration_common_controller.py
|
4be97be970f6d711da18e92541b02763099255d7
|
[] |
no_license
|
https://github.com/kitoku-magic/user_registration_form
|
13758138b4066eddda324693164702f609d1ff60
|
f530cb9147c197635c3f057421ab26d579b29da0
|
refs/heads/master
| 2023-08-08T15:16:19.802530 | 2021-09-26T05:14:22 | 2021-09-26T05:14:22 | 173,554,653 | 0 | 0 | null | false | 2023-07-25T20:22:37 | 2019-03-03T09:26:29 | 2021-09-26T05:14:35 | 2023-07-25T20:22:34 | 2,642 | 0 | 0 | 5 |
Python
| false | false |
from python_library.src.custom_flask.controller import controller
from src import abc
from src import os
from src.model.entity.birth_days_entity import birth_days_entity
from src.model.entity.contact_methods_entity import contact_methods_entity
from src.model.entity.jobs_entity import jobs_entity
from src.model.entity.knew_triggers_entity import knew_triggers_entity
from src.model.entity.prefectures_entity import prefectures_entity
from src.model.entity.sexes_entity import sexes_entity
from src.model.entity.users_entity import users_entity
from src.model.repository.birth_days_repository import birth_days_repository
from src.model.repository.contact_methods_repository import contact_methods_repository
from src.model.repository.jobs_repository import jobs_repository
from src.model.repository.knew_triggers_repository import knew_triggers_repository
from src.model.repository.prefectures_repository import prefectures_repository
from src.model.repository.sexes_repository import sexes_repository
class user_registration_common_controller(controller):
"""
ユーザー登録の、共通処理を定義するクラス
"""
__abstract__ = True
def __init__(self):
super().__init__()
request = self.get_request()
# 使いやすさを考え、本処理を開始する前に、全てのフォームデータを設定しておく
if 'GET' == request.environ['REQUEST_METHOD']:
request_data = request.args
elif 'POST' == request.environ['REQUEST_METHOD']:
request_data = request.form
self.__users_entity = users_entity()
self.__users_entity.set_request_to_entity(request_data)
@abc.abstractmethod
def execute(self):
pass
def get_users_entity(self):
return self.__users_entity
def assign_all_form_data(self):
"""
全てのフォーム項目に値を設定する
"""
properties = self.__users_entity.get_all_properties()
for field, value in properties.items():
if value is None:
value = ''
# DBから郵便番号を取得して表示する時は、ハイフンを付ける
if 'zip_code' == field and '' != value and '-' not in value and self.__users_entity.zip_code_error is None:
value = value[0:3] + '-' + value[3:]
self.add_response_data(field, value)
def set_value_item(self):
"""
選択項目の表示内容を取得して設定
"""
value_items = [
# 性別
{
'repository': sexes_repository(sexes_entity()),
'method': 'find_all',
'select': (sexes_entity.sex_id, sexes_entity.sex_name),
'where': '',
'params': {},
'order_by': 'sexes_sex_id ASC',
'template_param_name': 'sexes',
},
# 都道府県
{
'repository': prefectures_repository(prefectures_entity()),
'method': 'find_all',
'select': (prefectures_entity.prefecture_id, prefectures_entity.prefecture_name),
'where': '',
'params': {},
'order_by': 'prefectures_prefecture_id ASC',
'template_param_name': 'prefectures',
},
# 連絡方法
{
'repository': contact_methods_repository(contact_methods_entity()),
'method': 'find_all',
'select': (contact_methods_entity.contact_method_id, contact_methods_entity.contact_method_name),
'where': '',
'params': {},
'order_by': 'contact_methods_contact_method_id ASC',
'template_param_name': 'contact_methods',
},
# 知ったきっかけ
{
'repository': knew_triggers_repository(knew_triggers_entity()),
'method': 'find_all',
'select': (knew_triggers_entity.knew_trigger_id, knew_triggers_entity.knew_trigger_name),
'where': '',
'params': {},
'order_by': 'knew_triggers_knew_trigger_id ASC',
'template_param_name': 'knew_triggers',
},
]
for value_item in value_items:
result_data = getattr(value_item['repository'], value_item['method'])(
value_item['select'],
value_item['where'],
value_item['params'],
value_item['order_by']
)
response_data = []
for row in result_data:
response_dict = {'id': row[0], 'name': row[1]}
response_data.append(response_dict)
self.add_response_data(value_item['template_param_name'], response_data)
# 以下は、特殊なケースの項目
# 誕生日
birth_days_repository_obj = birth_days_repository(birth_days_entity())
birth_days_all_data = birth_days_repository_obj.find_all(
(birth_days_entity.birth_day_id, birth_days_entity.birth_day),
'',
{},
'birth_days_birth_day_id ASC'
)
birth_years = []
birth_months = []
birth_days = []
birth_day = None
for row in birth_days_all_data:
birth_day = row.birth_day
if birth_day.year not in birth_years:
birth_years.append(birth_day.year)
if birth_day.month not in birth_months:
birth_months.append(birth_day.month)
if birth_day.day not in birth_days:
birth_days.append(birth_day.day)
self.add_response_data('birth_years', birth_years)
self.add_response_data('birth_months', birth_months)
self.add_response_data('birth_days', birth_days)
# 職業(その他を末尾に表示させる)
jobs_repository_obj = jobs_repository(jobs_entity())
jobs = jobs_repository_obj.find_all_order_by_job_other_last(
(jobs_entity.job_id, jobs_entity.job_name),
'',
{},
'jobs_job_id ASC'
)
self.add_response_data('jobs', jobs)
def select_value_item(self):
"""
選択項目の選択状態を設定
"""
# 単一選択項目
single_items = [
{
'name': 'birth_year',
'default_value': '',
},
{
'name': 'birth_month',
'default_value': '',
},
{
'name': 'birth_day',
'default_value': '',
},
{
'name': 'prefecture_id',
'default_value': '',
},
{
'name': 'sex_id',
'default_value': 1,
},
{
'name': 'job_id',
'default_value': 2,
},
{
'name': 'is_latest_news_hoped',
'default_value': 1,
},
{
'name': 'is_personal_information_provide_agreed',
'default_value': '',
},
]
for single_item in single_items:
form_value = getattr(self.__users_entity, single_item['name'], None)
if form_value is None:
value = single_item['default_value']
else:
value = form_value
self.add_response_data(single_item['name'], value)
# 複数選択項目
multiple_items = [
{
'name': 'user_contact_methods_collection',
'field': 'contact_method_id',
'default_value': [],
},
{
'name': 'user_knew_triggers_collection',
'field': 'knew_trigger_id',
'default_value': [],
},
]
for multiple_item in multiple_items:
form_value = getattr(self.__users_entity, multiple_item['name'], None)
if form_value is None:
value = multiple_item['default_value']
else:
ids = []
for entity in form_value:
ids.append(int(getattr(entity, multiple_item['field'], None)))
value = ids
self.add_response_data(multiple_item['name'], value)
def remove_upload_file(self, users_entity_obj):
"""
アップロードされたファイルを削除する
"""
if True == isinstance(users_entity_obj.file_name, str) and os.path.isfile(users_entity_obj.file_path):
os.remove(users_entity_obj.file_path)
|
UTF-8
|
Python
| false | false | 8,804 |
py
| 101 |
user_registration_common_controller.py
| 87 | 0.524547 | 0.523594 | 0 | 215 | 38.032558 | 119 |
damoncheng/Markdown
| 18,150,531,823,079 |
b4680538c614b4ad40555f684bf6e6f2d16f7dbe
|
06682e4ce38bc10f215e0209e35442025748e381
|
/Interpreter/Python/lib/metaclass.py
|
be7bd778fce27aa05484bd6c681bb549477fee8e
|
[] |
no_license
|
https://github.com/damoncheng/Markdown
|
6a9983baaf21bd7d67a3846e42fc136679e2a7d8
|
ae7958aba91e21c4133222978e1028b91a2478cb
|
refs/heads/master
| 2023-03-24T14:45:42.000592 | 2022-04-15T16:31:41 | 2022-04-15T16:31:41 | 65,825,196 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import inspect
print("1......")
class UpperMetaClass(type):
def __new__(upperattr_metaclass, future_class_name, future_class_parents, future_class_attr, **kwargs):
print("upperattr_metaclass : ", upperattr_metaclass)
print("future_class_name : ", future_class_name)
print("future_class_parents : ", future_class_parents)
print("future_class_attr : ", future_class_attr)
upper_attr = {}
for name,value in future_class_attr.items():
if not name.startswith('__'):
upper_attr[name.upper()] = value
else:
upper_attr[name] = value
new_class = super(UpperMetaClass, upperattr_metaclass).__new__(upperattr_metaclass, future_class_name, future_class_parents, upper_attr, **kwargs)
new_class._prepare()
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
print("metaclass _pepare")
cls.add_to_class("m", "123")
print("2......")
class ParentTest(metaclass=UpperMetaClass):
parent_name = "hello parent metaclass"
def __init__(self, *args, **kwargs):
self.parent_m = 456
print("3......")
class Test(ParentTest):
name = "hello metaclass"
def __init__(self, *args, **kwargs):
self.m = 123
super().__init__()
print("4......")
test = Test()
"""
print("test is created...")
print("test.parent_m : ", test.parent_m)
print("test.PARENT_NAME : ", test.PARENT_NAME)
"""
"""
test = Test()
test.n = 123
print(test.__dict__)
print(Test.__class__)
print(hasattr(test, "NAME"))
print(hasattr(test, "name"))
"""
|
UTF-8
|
Python
| false | false | 1,888 |
py
| 476 |
metaclass.py
| 252 | 0.59428 | 0.585805 | 0 | 83 | 21.746988 | 154 |
Zapix/gym-backend
| 1,898,375,569,024 |
117deed2b6cd7e89449ece003da80e29d38daea7
|
7c154a88605bf183c3134708d4f5d4de87c0c92f
|
/gym_backend/exercises/migrations/0001_initial.py
|
da1254ef04e321809f9118c20dcafca66a4d9f68
|
[] |
no_license
|
https://github.com/Zapix/gym-backend
|
faeae5f8e6447e47b6da1d69ff80585fdb2381f4
|
e75cbe9fae9ff4b2bebeabe3113f644003b9aa59
|
refs/heads/master
| 2021-01-01T18:29:44.055588 | 2013-11-22T07:16:35 | 2013-11-22T07:16:35 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MuscleGroup'
db.create_table(u'exercises_musclegroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'exercises', ['MuscleGroup'])
# Adding model 'Exercise'
db.create_table(u'exercises_exercise', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('muscle_group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='exercise_set', to=orm['exercises.MuscleGroup'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('description', self.gf('django.db.models.fields.TextField')()),
('youtube_link', self.gf('django.db.models.fields.URLField')(max_length=200)),
))
db.send_create_signal(u'exercises', ['Exercise'])
def backwards(self, orm):
# Deleting model 'MuscleGroup'
db.delete_table(u'exercises_musclegroup')
# Deleting model 'Exercise'
db.delete_table(u'exercises_exercise')
models = {
u'exercises.exercise': {
'Meta': {'object_name': 'Exercise'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'muscle_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_set'", 'to': u"orm['exercises.MuscleGroup']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'youtube_link': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'exercises.musclegroup': {
'Meta': {'ordering': "['id']", 'object_name': 'MuscleGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['exercises']
|
UTF-8
|
Python
| false | false | 2,339 |
py
| 19 |
0001_initial.py
| 12 | 0.592988 | 0.584438 | 0 | 53 | 43.150943 | 154 |
TheEnbyperor/racing_sim
| 15,805,479,667,501 |
22df2c29bdf3d0831f62dea4dda9dd07d0b10ee3
|
97fea2ec15c7a000313537c0c2be2cbe11f6fa24
|
/game/wheel.py
|
45758e79e319b58ecb9b4bf19fe83d10ac622a81
|
[] |
no_license
|
https://github.com/TheEnbyperor/racing_sim
|
7ea6c0406728863348a134dd3a64bb1a8a7b24e3
|
1a083fb56792b562f888ea74d53b3eca3b059b3c
|
refs/heads/master
| 2020-04-25T18:19:11.442235 | 2019-03-10T10:21:36 | 2019-03-10T10:21:36 | 172,980,411 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from inputs import devices
stick = devices.gamepads[0]
while True:
events = stick.read()
for event in events:
print(event.ev_type, event.code, event.state)
|
UTF-8
|
Python
| false | false | 174 |
py
| 8 |
wheel.py
| 6 | 0.683908 | 0.678161 | 0 | 8 | 20.75 | 53 |
nub8p/2020-Summer-Jookgorithm
| 7,224,134,992,671 |
c2349685100e98ed4f0f580feb1b8799f2287f95
|
8fe5d1db11f5ed8c27fee2740c5dbe188d09f48b
|
/최민우/[20.07.12] 10825.py
|
d25363173765263d582c2f82b84252a32ff357fe
|
[] |
no_license
|
https://github.com/nub8p/2020-Summer-Jookgorithm
|
ec0f9117be1cbadb1a64bc5a048a9f4efdddd0cd
|
2bf0aeb3a60e05ab3762d0d54fe5f8c5eb2bdc3f
|
refs/heads/master
| 2023-05-05T15:27:31.009599 | 2021-05-03T18:59:20 | 2021-05-03T18:59:20 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
people = []
for i in range(int(input())):
line = input().split()
name = line[0]
kor, eng, math = map(int, line[1:])
people.append({'name': name,
'kor': kor,
'eng': eng,
'math': math})
people.sort(key=lambda x: (-x['kor'], x['eng'], -x['math'], x['name']))
print('\n'.join(i['name'] for i in people))
|
UTF-8
|
Python
| false | false | 373 |
py
| 419 |
[20.07.12] 10825.py
| 345 | 0.466488 | 0.461126 | 0 | 11 | 32.909091 | 71 |
ceos-seo/data_cube_utilities
| 1,537,598,312,469 |
3c05cdbafd0f4389a2ea9cad91e0b0d89d0a1967
|
45ef0f26edc1fd1940267c3836c30e10b2fecd39
|
/data_cube_utilities/dc_baseline.py
|
76ea088c8375a5c5855b6c06b5639c3422e1e4c5
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/ceos-seo/data_cube_utilities
|
1834357348c36c2155968af15ceab87721e43d63
|
1979c2a160f2ad306582c60f9ad6be67dfc353ab
|
refs/heads/master
| 2021-11-05T20:22:41.632698 | 2021-10-25T23:19:56 | 2021-10-29T15:45:10 | 77,079,324 | 16 | 22 |
Apache-2.0
| false | 2021-02-05T19:51:59 | 2016-12-21T19:13:40 | 2021-02-03T16:50:42 | 2021-02-05T19:51:58 | 489 | 9 | 17 | 3 |
Python
| false | false |
import numpy as np
import xarray
import xarray.core.ops as ops
import xarray as xr
from itertools import islice
def _window(seq, n=2):
"""Returns a sliding _window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... """
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def _composite_of_first(arrays, reverse=False, name_suffix="_composite"):
#In memory of Rube Goldberg
narrays = arrays.copy(deep=True)
narrays.values = narrays.values[::-1] if reverse else narrays.values
notnulls = [ops.notnull(array) for array in narrays]
first_notnull = ops.argmax(ops.stack(notnulls), axis=0)
composite = np.choose(first_notnull, narrays)
return xr.DataArray(
composite,
coords=[narrays.latitude, narrays.longitude],
dims=['latitude', 'longitude'],
name="{band}{suffix}".format(band=narrays.name, suffix=name_suffix))
def _mosaic(dataset, most_recent_first=False, custom_label="_composite"):
return xr.merge([
_composite_of_first(dataset[variable], reverse=most_recent_first, name_suffix=custom_label)
for variable in dataset.data_vars
])
def _composite_by_average(dataset, custom_label="_composite"):
composite = dataset.mean('time')
return composite
## This should be the the only method called from dc baseline
def generate_baseline(dataset, composite_size=5, mode="average", custom_label=""):
ranges = _window(range(len(dataset.time)), n=composite_size + 1)
reffs = (dataset.isel(time=list(frame)[:-1]) for frame in ranges)
baselines = None
if mode == "average":
baselines = (_composite_by_average(ref, custom_label=custom_label) for ref in reffs)
elif mode == "composite":
baselines = (_mosaic(ref, most_recent_first=True, custom_label=custom_label) for ref in reffs)
baseline = xr.concat(baselines, dim='time')
baseline['time'] = dataset.time[composite_size:]
return baseline
|
UTF-8
|
Python
| false | false | 2,116 |
py
| 67 |
dc_baseline.py
| 60 | 0.65879 | 0.653119 | 0 | 59 | 34.864407 | 102 |
CodeLogist/RAdam-PLSTM-FX-Trader
| 2,645,699,879,420 |
c8cf84b287bd890db802449599a8373a3819cff9
|
72789db2e7d27995fd364841d33808cdf4326da0
|
/data.py
|
de171f00ac8210e5ff182d5caf55df67a6f8c924
|
[] |
no_license
|
https://github.com/CodeLogist/RAdam-PLSTM-FX-Trader
|
3a8f5fa0c2a969d5f0a56959401d75a6b35956d5
|
a38c25a0e49d50a35bd586c15c2b3a10b293b648
|
refs/heads/master
| 2021-05-18T19:03:47.494160 | 2020-03-30T17:02:45 | 2020-03-30T17:02:45 | 251,371,181 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import pandas as pd
import os
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler
from sklearn.externals import joblib
output_scale = 100
def get_day(x):
try:
y = datetime.strptime(x,'%Y/%m/%d')
except:
try:
y = datetime.strptime(x,'%m/%d/%Y')
except:
y = datetime.strptime(x,'%d-%m-%y')
y = str(y).split(' ')[0]
return y
def split(X, Y, ratio):
"""
Split in train and test test set
"""
train_len = int(len(X) * ratio)
trainX = X[:train_len]
trainY = Y[:train_len]
testX = X[train_len:]
testY = Y[train_len:]
return trainX, trainY, testX, testY
def create_dataset(dataframe, win_size, delta_t, scaler_path, save_scaler = True):
dataframe.reset_index(drop=True,inplace=True)
data = dataframe[['toHigh','toLow','toClose','Ticks','Spread',\
'Date','Seconds']]
# Disable SettingWithCopyWarning
pd.options.mode.chained_assignment = None
data['Hour'] = dataframe['Seconds'].apply(lambda x: int(str(x).split(':')[0])/23.)
data['Date'] = dataframe['Date'].apply(lambda x: get_day(x))
data['Day'] = data['Date'].apply(lambda x: int(datetime.strptime(x, '%Y-%m-%d').weekday())/6)
data['Month'] = data['Date'].apply(lambda x: (int(x.split('-')[1])-1)/11)
data = data.drop(['Date','Seconds'],axis=1).values
pd.options.mode.chained_assignment = 'warn'
if save_scaler:
# Create, set-up and save scaler
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(data)
joblib.dump(scaler, scaler_path)
else:
# load scaler
scaler = joblib.load(scaler_path)
# scale data
data = scaler.transform(data)
#outputs
hilo = dataframe[['toHigh','toLow']].values*output_scale
dataX, dataY = [], []
for i in range(0, len(data) - win_size, delta_t):
a = data[i:(i+win_size), :]
dataX.append(a)
dataY.append(hilo[i + win_size])
return np.array(dataX), np.array(dataY)
def get_win(df,win_size,scaler):
df = df.tail(win_size)
# Disable SettingWithCopyWarning
pd.options.mode.chained_assignment = None
data = df[['toHigh','toLow','toClose','Ticks','Spread']]
# Convert datetime
data.loc[:,'Hour'] = df.loc[:,'Datetime'].apply(lambda x: x.hour/23)
data.loc[:,'Weekday'] = df.loc[:,'Datetime'].apply(lambda x: int(x.weekday())/6)
data.loc[:,'Month'] = df.loc[:,'Datetime'].apply(lambda x: (x.month-1)/11)
pd.options.mode.chained_assignment = 'warn'
data = scaler.transform(data)
return np.array(data)
|
UTF-8
|
Python
| false | false | 2,741 |
py
| 7 |
data.py
| 6 | 0.582999 | 0.574973 | 0 | 80 | 32.2625 | 100 |
FrankSauve/390AntennaPod
| 4,483,945,899,145 |
df4f4079423d660944750cc3295ae579baaca1e4
|
bfdf7c033eec65e7d7393145c809d643cf0824e2
|
/contributers.template.py
|
3127baa0c063dfccb2dcd064bd91e2609b1e0bec
|
[
"MIT"
] |
permissive
|
https://github.com/FrankSauve/390AntennaPod
|
5c790aa58ccd7e06fe5772ca10de00504a13b44a
|
dbed5b9841fbfb32ab51b61ab6b83a1950c83796
|
refs/heads/develop
| 2021-09-12T18:13:27.484257 | 2018-04-19T14:47:21 | 2018-04-19T14:47:21 | 116,841,114 | 3 | 1 |
MIT
| false | 2019-03-17T20:50:07 | 2018-01-09T16:28:34 | 2018-11-20T12:37:32 | 2018-04-19T19:09:52 | 22,758 | 3 | 1 | 2 |
Java
| false | null |
#!/usr/bin/env python3
import requests
import subprocess
TRANSIFEX_USER = ""
TRANSIFEX_PW = ""
print('DEVELOPERS\n==========\n')
p = subprocess.Popen("git log --format='%aN' | sort -fu", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output = line.decode()
print(output, end='')
language_codes = {
"af_NA": "Afrikaans (Namibia)",
"af_ZA": "Afrikaans (South Africa)",
"af": "Afrikaans",
"ak_GH": "Akan (Ghana)",
"ak": "Akan",
"sq_AL": "Albanian (Albania)",
"sq": "Albanian",
"am_ET": "Amharic (Ethiopia)",
"am": "Amharic",
"ar_DZ": "Arabic (Algeria)",
"ar_BH": "Arabic (Bahrain)",
"ar_EG": "Arabic (Egypt)",
"ar_IQ": "Arabic (Iraq)",
"ar_JO": "Arabic (Jordan)",
"ar_KW": "Arabic (Kuwait)",
"ar_LB": "Arabic (Lebanon)",
"ar_LY": "Arabic (Libya)",
"ar_MA": "Arabic (Morocco)",
"ar_OM": "Arabic (Oman)",
"ar_QA": "Arabic (Qatar)",
"ar_SA": "Arabic (Saudi Arabia)",
"ar_SD": "Arabic (Sudan)",
"ar_SY": "Arabic (Syria)",
"ar_TN": "Arabic (Tunisia)",
"ar_AE": "Arabic (United Arab Emirates)",
"ar_YE": "Arabic (Yemen)",
"ar": "Arabic",
"hy_AM": "Armenian (Armenia)",
"hy": "Armenian",
"as_IN": "Assamese (India)",
"as": "Assamese",
"asa_TZ": "Asu (Tanzania)",
"asa": "Asu",
"az_Cyrl": "Azerbaijani (Cyrillic)",
"az_Cyrl_AZ": "Azerbaijani (Cyrillic, Azerbaijan)",
"az_Latn": "Azerbaijani (Latin)",
"az_Latn_AZ": "Azerbaijani (Latin, Azerbaijan)",
"az": "Azerbaijani",
"bm_ML": "Bambara (Mali)",
"bm": "Bambara",
"eu_ES": "Basque (Spain)",
"eu": "Basque",
"be_BY": "Belarusian (Belarus)",
"be": "Belarusian",
"bem_ZM": "Bemba (Zambia)",
"bem": "Bemba",
"bez_TZ": "Bena (Tanzania)",
"bez": "Bena",
"bn_BD": "Bengali (Bangladesh)",
"bn_IN": "Bengali (India)",
"bn": "Bengali",
"bs_BA": "Bosnian (Bosnia and Herzegovina)",
"bs": "Bosnian",
"bg_BG": "Bulgarian (Bulgaria)",
"bg": "Bulgarian",
"my_MM": "Burmese (Myanmar [Burma])",
"my": "Burmese",
"ca_ES": "Catalan (Spain)",
"ca": "Catalan",
"tzm_Latn": "Central Morocco Tamazight (Latin)",
"tzm_Latn_MA": "Central Morocco Tamazight (Latin, Morocco)",
"tzm": "Central Morocco Tamazight",
"chr_US": "Cherokee (United States)",
"chr": "Cherokee",
"cgg_UG": "Chiga (Uganda)",
"cgg": "Chiga",
"zh": "Chinese (Simplified)",
"zh_CN": "Chinese (China)",
"zh_HK": "Chinese (Hong Kong SAR China)",
"zh_MO": "Chinese (Macau SAR China)",
"zh_MO_SG": "Chinese (Singapore)",
"zh_TW": "Chinese (Taiwan)",
"zh": "Chinese",
"kw_GB": "Cornish (United Kingdom)",
"kw": "Cornish",
"hr_HR": "Croatian (Croatia)",
"hr": "Croatian",
"cs_CZ": "Czech (Czech Republic)",
"cs": "Czech",
"da_DK": "Danish (Denmark)",
"da": "Danish",
"nl_BE": "Dutch (Belgium)",
"nl_NL": "Dutch (Netherlands)",
"nl": "Dutch",
"ebu_KE": "Embu (Kenya)",
"ebu": "Embu",
"en_AS": "English (American Samoa)",
"en_AU": "English (Australia)",
"en_BE": "English (Belgium)",
"en_BZ": "English (Belize)",
"en_BW": "English (Botswana)",
"en_CA": "English (Canada)",
"en_GU": "English (Guam)",
"en_HK": "English (Hong Kong SAR China)",
"en_IN": "English (India)",
"en_IE": "English (Ireland)",
"en_JM": "English (Jamaica)",
"en_MT": "English (Malta)",
"en_MH": "English (Marshall Islands)",
"en_MU": "English (Mauritius)",
"en_NA": "English (Namibia)",
"en_NZ": "English (New Zealand)",
"en_MP": "English (Northern Mariana Islands)",
"en_PK": "English (Pakistan)",
"en_PH": "English (Philippines)",
"en_SG": "English (Singapore)",
"en_ZA": "English (South Africa)",
"en_TT": "English (Trinidad and Tobago)",
"en_UM": "English (U.S. Minor Outlying Islands)",
"en_VI": "English (U.S. Virgin Islands)",
"en_GB": "English (United Kingdom)",
"en_US": "English (United States)",
"en_ZW": "English (Zimbabwe)",
"en": "English",
"eo": "Esperanto",
"et_EE": "Estonian (Estonia)",
"et": "Estonian",
"ee_GH": "Ewe (Ghana)",
"ee_TG": "Ewe (Togo)",
"ee": "Ewe",
"fo_FO": "Faroese (Faroe Islands)",
"fo": "Faroese",
"fil_PH": "Filipino (Philippines)",
"fil": "Filipino",
"fi_FI": "Finnish (Finland)",
"fi": "Finnish",
"fr_BE": "French (Belgium)",
"fr_BJ": "French (Benin)",
"fr_BF": "French (Burkina Faso)",
"fr_BI": "French (Burundi)",
"fr_CM": "French (Cameroon)",
"fr_CA": "French (Canada)",
"fr_CF": "French (Central African Republic)",
"fr_TD": "French (Chad)",
"fr_KM": "French (Comoros)",
"fr_CG": "French (Congo - Brazzaville)",
"fr_CD": "French (Congo - Kinshasa)",
"fr_CI": "French (Côte d’Ivoire)",
"fr_DJ": "French (Djibouti)",
"fr_GQ": "French (Equatorial Guinea)",
"fr_FR": "French (France)",
"fr_GA": "French (Gabon)",
"fr_GP": "French (Guadeloupe)",
"fr_GN": "French (Guinea)",
"fr_LU": "French (Luxembourg)",
"fr_MG": "French (Madagascar)",
"fr_ML": "French (Mali)",
"fr_MQ": "French (Martinique)",
"fr_MC": "French (Monaco)",
"fr_NE": "French (Niger)",
"fr_RW": "French (Rwanda)",
"fr_RE": "French (Réunion)",
"fr_BL": "French (Saint Barthélemy)",
"fr_MF": "French (Saint Martin)",
"fr_SN": "French (Senegal)",
"fr_CH": "French (Switzerland)",
"fr_TG": "French (Togo)",
"fr": "French",
"ff_SN": "Fulah (Senegal)",
"ff": "Fulah",
"gl_ES": "Galician (Spain)",
"gl": "Galician",
"lg_UG": "Ganda (Uganda)",
"lg": "Ganda",
"ka_GE": "Georgian (Georgia)",
"ka": "Georgian",
"de_AT": "German (Austria)",
"de_BE": "German (Belgium)",
"de_DE": "German (Germany)",
"de_LI": "German (Liechtenstein)",
"de_LU": "German (Luxembourg)",
"de_CH": "German (Switzerland)",
"de": "German",
"el_CY": "Greek (Cyprus)",
"el_GR": "Greek (Greece)",
"el": "Greek",
"gu_IN": "Gujarati (India)",
"gu": "Gujarati",
"guz_KE": "Gusii (Kenya)",
"guz": "Gusii",
"ha_Latn": "Hausa (Latin)",
"ha_Latn_GH": "Hausa (Latin, Ghana)",
"ha_Latn_NE": "Hausa (Latin, Niger)",
"ha_Latn_NG": "Hausa (Latin, Nigeria)",
"ha": "Hausa",
"haw_US": "Hawaiian (United States)",
"haw": "Hawaiian",
"he_IL": "Hebrew (Israel)",
"he": "Hebrew",
"hi_IN": "Hindi (India)",
"hi": "Hindi",
"hu_HU": "Hungarian (Hungary)",
"hu": "Hungarian",
"is_IS": "Icelandic (Iceland)",
"is": "Icelandic",
"ig_NG": "Igbo (Nigeria)",
"ig": "Igbo",
"id_ID": "Indonesian (Indonesia)",
"id": "Indonesian",
"ga_IE": "Irish (Ireland)",
"ga": "Irish",
"it_IT": "Italian (Italy)",
"it_CH": "Italian (Switzerland)",
"it": "Italian",
"ja_JP": "Japanese (Japan)",
"ja": "Japanese",
"kea_CV": "Kabuverdianu (Cape Verde)",
"kea": "Kabuverdianu",
"kab_DZ": "Kabyle (Algeria)",
"kab": "Kabyle",
"kl_GL": "Kalaallisut (Greenland)",
"kl": "Kalaallisut",
"kln_KE": "Kalenjin (Kenya)",
"kln": "Kalenjin",
"kam_KE": "Kamba (Kenya)",
"kam": "Kamba",
"kn_IN": "Kannada (India)",
"kn": "Kannada",
"kk_Cyrl": "Kazakh (Cyrillic)",
"kk_Cyrl_KZ": "Kazakh (Cyrillic, Kazakhstan)",
"kk": "Kazakh",
"km_KH": "Khmer (Cambodia)",
"km": "Khmer",
"ki_KE": "Kikuyu (Kenya)",
"ki": "Kikuyu",
"rw_RW": "Kinyarwanda (Rwanda)",
"rw": "Kinyarwanda",
"kok_IN": "Konkani (India)",
"kok": "Konkani",
"ko_KR": "Korean (South Korea)",
"ko": "Korean",
"khq_ML": "Koyra Chiini (Mali)",
"khq": "Koyra Chiini",
"ses_ML": "Koyraboro Senni (Mali)",
"ses": "Koyraboro Senni",
"lag_TZ": "Langi (Tanzania)",
"lag": "Langi",
"lv_LV": "Latvian (Latvia)",
"lv": "Latvian",
"lt_LT": "Lithuanian (Lithuania)",
"lt": "Lithuanian",
"luo_KE": "Luo (Kenya)",
"luo": "Luo",
"luy_KE": "Luyia (Kenya)",
"luy": "Luyia",
"mk_MK": "Macedonian (Macedonia)",
"mk": "Macedonian",
"jmc_TZ": "Machame (Tanzania)",
"jmc": "Machame",
"kde_TZ": "Makonde (Tanzania)",
"kde": "Makonde",
"mg_MG": "Malagasy (Madagascar)",
"mg": "Malagasy",
"ms_BN": "Malay (Brunei)",
"ms_MY": "Malay (Malaysia)",
"ms": "Malay",
"ml_IN": "Malayalam (India)",
"ml": "Malayalam",
"mt_MT": "Maltese (Malta)",
"mt": "Maltese",
"gv_GB": "Manx (United Kingdom)",
"gv": "Manx",
"mr_IN": "Marathi (India)",
"mr": "Marathi",
"mas_KE": "Masai (Kenya)",
"mas_TZ": "Masai (Tanzania)",
"mas": "Masai",
"mer_KE": "Meru (Kenya)",
"mer": "Meru",
"mfe_MU": "Morisyen (Mauritius)",
"mfe": "Morisyen",
"naq_NA": "Nama (Namibia)",
"naq": "Nama",
"ne_IN": "Nepali (India)",
"ne_NP": "Nepali (Nepal)",
"ne": "Nepali",
"nd_ZW": "North Ndebele (Zimbabwe)",
"nd": "North Ndebele",
"no": "Norwegian",
"nb_NO": "Norwegian Bokmål (Norway)",
"nb": "Norwegian Bokmål",
"nn_NO": "Norwegian Nynorsk (Norway)",
"nn": "Norwegian Nynorsk",
"nyn_UG": "Nyankole (Uganda)",
"nyn": "Nyankole",
"or_IN": "Oriya (India)",
"or": "Oriya",
"om_ET": "Oromo (Ethiopia)",
"m_KE": "Oromo (Kenya)",
"om": "Oromo",
"ps_AF": "Pashto (Afghanistan)",
"ps": "Pashto",
"fa_AF": "Persian (Afghanistan)",
"fa_IR": "Persian (Iran)",
"fa": "Persian",
"pl_PL": "Polish (Poland)",
"pl": "Polish",
"pt_BR": "Portuguese (Brazil)",
"pt_GW": "Portuguese (Guinea-Bissau)",
"pt_MZ": "Portuguese (Mozambique)",
"pt_PT": "Portuguese (Portugal)",
"pt": "Portuguese",
"pa_Arab": "Punjabi (Arabic)",
"pa_Arab_PK": "Punjabi (Arabic, Pakistan)",
"pa_Guru": "Punjabi (Gurmukhi)",
"pa_Guru_IN": "Punjabi (Gurmukhi, India)",
"pa": "Punjabi",
"ro_MD": "Romanian (Moldova)",
"ro_RO": "Romanian (Romania)",
"ro": "Romanian",
"rm_CH": "Romansh (Switzerland)",
"rm": "Romansh",
"rof_TZ": "Rombo (Tanzania)",
"rof": "Rombo",
"ru_MD": "Russian (Moldova)",
"ru_RU": "Russian (Russia)",
"ru_UA": "Russian (Ukraine)",
"ru": "Russian",
"rwk_TZ": "Rwa (Tanzania)",
"rwk": "Rwa",
"saq_KE": "Samburu (Kenya)",
"saq": "Samburu",
"sg_CF": "Sango (Central African Republic)",
"sg": "Sango",
"seh_MZ": "Sena (Mozambique)",
"seh": "Sena",
"sr_Cyrl": "Serbian (Cyrillic)",
"sr_Cyrl_BA": "Serbian (Cyrillic, Bosnia and Herzegovina)",
"sr_Cyrl_ME": "Serbian (Cyrillic, Montenegro)",
"sr_Cyrl_RS": "Serbian (Cyrillic, Serbia)",
"sr_Latn": "Serbian (Latin)",
"sr_Latn_BA": "Serbian (Latin, Bosnia and Herzegovina)",
"sr_Latn_ME": "Serbian (Latin, Montenegro)",
"sr_Latn_RS": "Serbian (Latin, Serbia)",
"sr": "Serbian",
"sn_ZW": "Shona (Zimbabwe)",
"sn": "Shona",
"ii_CN": "Sichuan Yi (China)",
"ii": "Sichuan Yi",
"si_LK": "Sinhala (Sri Lanka)",
"si": "Sinhala",
"sk_SK": "Slovak (Slovakia)",
"sk": "Slovak",
"sl_SI": "Slovenian (Slovenia)",
"sl": "Slovenian",
"xog_UG": "Soga (Uganda)",
"xog": "Soga",
"so_DJ": "Somali (Djibouti)",
"so_ET": "Somali (Ethiopia)",
"so_KE": "Somali (Kenya)",
"so_SO": "Somali (Somalia)",
"so": "Somali",
"es_AR": "Spanish (Argentina)",
"es_BO": "Spanish (Bolivia)",
"es_CL": "Spanish (Chile)",
"es_CO": "Spanish (Colombia)",
"es_CR": "Spanish (Costa Rica)",
"es_DO": "Spanish (Dominican Republic)",
"es_EC": "Spanish (Ecuador)",
"es_SV": "Spanish (El Salvador)",
"es_GQ": "Spanish (Equatorial Guinea)",
"es_GT": "Spanish (Guatemala)",
"es_HN": "Spanish (Honduras)",
"es_419": "Spanish (Latin America)",
"es_MX": "Spanish (Mexico)",
"es_NI": "Spanish (Nicaragua)",
"es_PA": "Spanish (Panama)",
"es_PY": "Spanish (Paraguay)",
"es_PE": "Spanish (Peru)",
"es_PR": "Spanish (Puerto Rico)",
"es_ES": "Spanish (Spain)",
"es_US": "Spanish (United States)",
"es_UY": "Spanish (Uruguay)",
"es_VE": "Spanish (Venezuela)",
"es": "Spanish",
"sw_KE": "Swahili (Kenya)",
"sw_TZ": "Swahili (Tanzania)",
"sw": "Swahili",
"sv_FI": "Swedish (Finland)",
"sv_SE": "Swedish (Sweden)",
"sv": "Swedish",
"gsw_CH": "Swiss German (Switzerland)",
"gsw": "Swiss German",
"shi_Latn": "Tachelhit (Latin)",
"shi_Latn_MA": "Tachelhit (Latin, Morocco)",
"shi_Tfng": "Tachelhit (Tifinagh)",
"shi_Tfng_MA": "Tachelhit (Tifinagh, Morocco)",
"shi": "Tachelhit",
"dav_KE": "Taita (Kenya)",
"dav": "Taita",
"ta_IN": "Tamil (India)",
"ta_LK": "Tamil (Sri Lanka)",
"ta": "Tamil",
"te_IN": "Telugu (India)",
"te": "Telugu",
"teo_KE": "Teso (Kenya)",
"teo_UG": "Teso (Uganda)",
"teo": "Teso",
"th_TH": "Thai (Thailand)",
"th": "Thai",
"bo_CN": "Tibetan (China)",
"bo_IN": "Tibetan (India)",
"bo": "Tibetan",
"ti_ER": "Tigrinya (Eritrea)",
"ti_ET": "Tigrinya (Ethiopia)",
"ti": "Tigrinya",
"to_TO": "Tonga (Tonga)",
"to": "Tonga",
"tr_TR": "Turkish (Turkey)",
"tr": "Turkish",
"uk_UA": "Ukrainian (Ukraine)",
"uk": "Ukrainian",
"ur_IN": "Urdu (India)",
"ur_PK": "Urdu (Pakistan)",
"ur": "Urdu",
"uz_Arab": "Uzbek (Arabic)",
"uz_Arab_AF": "Uzbek (Arabic, Afghanistan)",
"uz_Cyrl": "Uzbek (Cyrillic)",
"uz_Cyrl_UZ": "Uzbek (Cyrillic, Uzbekistan)",
"uz_Latn": "Uzbek (Latin)",
"uz_Latn_UZ": "Uzbek (Latin, Uzbekistan)",
"uz": "Uzbek",
"vi_VN": "Vietnamese (Vietnam)",
"vi": "Vietnamese",
"vun_TZ": "Vunjo (Tanzania)",
"vun": "Vunjo",
"cy_GB": "Welsh (United Kingdom)",
"cy": "Welsh",
"yo_NG": "Yoruba (Nigeria)",
"yo": "Yoruba",
"zu_ZA": "Zulu (South Africa)",
"zu": "Zulu"
}
print('\n\nTRANSLATORS\n===========\n')
r = requests.get('http://www.transifex.com/api/2/project/antennapod/languages/', auth=(TRANSIFEX_USER, TRANSIFEX_PW))
for lang in r.json():
lang_contributers = lang['coordinators'] + lang['reviewers'] + lang['translators']
lang_contributers = sorted(lang_contributers, key=str.lower)
print(language_codes[lang['language_code']], ": ", ', '.join(lang_contributers), sep="")
|
UTF-8
|
Python
| false | false | 14,394 |
py
| 461 |
contributers.template.py
| 358 | 0.529645 | 0.529297 | 0 | 454 | 30.687225 | 119 |
juancordoba00/carsapp
| 6,665,789,292,244 |
ed5c3975a1538dab371199856800e84145d82928
|
64204193d70c5ab855dc2d591ac656d5557f4b91
|
/carsapp/migrations/0023_auto_20210705_1940.py
|
0e01364c12993427b06d6fc917b680f245024541
|
[] |
no_license
|
https://github.com/juancordoba00/carsapp
|
563150e5c381a10fd4cbac23bf3b58d69de4516c
|
38d404a34cdaf5676c83ed6dcb0442e32a12a865
|
refs/heads/master
| 2023-06-16T22:49:43.558107 | 2021-07-07T15:20:37 | 2021-07-07T15:20:37 | 372,249,300 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Generated by Django 3.1.4 on 2021-07-06 00:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carsapp', '0022_auto_20210705_1437'),
]
operations = [
migrations.AlterField(
model_name='detallesfacturas',
name='precio',
field=models.IntegerField(default=0, null=True),
),
migrations.AlterField(
model_name='servicios',
name='valor_Servicio',
field=models.IntegerField(default=0),
),
]
|
UTF-8
|
Python
| false | false | 569 |
py
| 55 |
0023_auto_20210705_1940.py
| 26 | 0.579965 | 0.521968 | 0 | 23 | 23.73913 | 60 |
sanshitangmalaban/GraduationProject
| 19,224,273,655,630 |
d83965fb1fcb877c2f587d42b3725535fbd5a37d
|
6c75f452e81c9b8106d4aafe02f16b76cfac0bd2
|
/MyTest/tests.py
|
e4c241a501f00d169a6dcae330585a4716150d47
|
[] |
no_license
|
https://github.com/sanshitangmalaban/GraduationProject
|
4aebe6226c1251a7ca4b2deddcf69a13f799e9be
|
b47ce4dc4a490017072c785261ccf0421500a4b0
|
refs/heads/master
| 2020-03-24T18:20:52.697395 | 2018-07-30T15:03:22 | 2018-07-30T15:03:22 | 142,889,763 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.test import TestCase
import sys,os
# Create your tests here.
# import time
# print(int(time.time()))
# url = '/images/ysu_logo.png'
# url_list = url.split('/')
# print(url_list)
import requests,json
# print(os.path.dirname(os.path.abspath(__file__)))
# print(sys.path.append(os.path.dirname(os.path.abspath(__file__))))
def get_goods_history(goods_url):
url = "http://tool.manmanbuy.com/history.aspx?DA=1&action=gethistory&url=https%3A//item.jd.hk/3800679.html&bjid=&spbh=&cxid=&zkid=&w=951&token="+token
print(url)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36",
"referer": "www.manmanbuy.com",
}
response = requests.request('get', url=url, headers=headers)
response_json = json.loads(response.text)
print(response_json)
if __name__ =="__main__":
get_goods_history("http://product.dangdang.com/1005161885.html")
|
UTF-8
|
Python
| false | false | 976 |
py
| 17 |
tests.py
| 12 | 0.671107 | 0.620902 | 0 | 34 | 27.735294 | 154 |
ryanmuller/quill-lightside
| 1,511,828,508,187 |
d0c28c04bab60b0e4fdf6d4c53085b6c6af95853
|
f88750fd480330bf332b3567c54a4761899a957a
|
/lightbox.py
|
5be44d5895224f8393b8f2240b6310e38427e58e
|
[] |
no_license
|
https://github.com/ryanmuller/quill-lightside
|
ca76c37242c475fd4e37cbc467185960bfac3163
|
56f958933017eba1a9b06ed9a6ef916a33297488
|
refs/heads/master
| 2020-05-17T11:06:07.596998 | 2014-12-28T21:13:55 | 2014-12-28T21:13:55 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import urlparse
import json
import random
import operator
token = ""
with open('config/token', 'r') as f:
token = f.read().strip()
prompts = [
"Why did the United States declare independence?",
"How did the moon form?",
"In Macbeth, whose ambition is the driving force of the play?",
"How does Tom Sawyer change over the course of the story?",
"What is Gestalt psychology?"
]
class LightboxResource(object):
base_url = "https://api.getlightbox.com/api/"
endpoint = None
HEADERS = {
"Authorization": "Token {token}".format(token=token),
"Content-Type": "application/json",
}
@staticmethod
def endpoint_for(resource):
return urlparse.urljoin(LightboxResource.base_url, resource+"/")
@staticmethod
def id_from_url(url):
return url.split('/')[-1]
@classmethod
def url_for(cls, _id):
return urlparse.urljoin(cls.endpoint, str(_id))
# TODO fail elegantly
@classmethod
def all(cls):
r = requests.get(cls.endpoint, headers=cls.HEADERS)
response = r.json()
objs = [ cls(cls.id_from_url(obj['url']), obj) for obj in response['results'] ]
return objs
@classmethod
def find(cls, _id):
r = requests.get(cls.url_for(_id), headers=cls.HEADERS)
obj = cls(_id, r.json())
return obj
@classmethod
def create(cls, data):
r = requests.post(cls.endpoint, data=json.dumps(data), headers=cls.HEADERS)
response = r.json()
obj = cls(cls.id_from_url(response['url']), response)
return obj
# TODO test me
def reload(self):
self = type(self).find(self._id)
return self
# TODO inherit only from Tasks
# TODO better info from status
def process(self):
if 'process' in self.response:
r = requests.post(self.response['process'], headers=type(self).HEADERS)
return r.status_code == 200 or r.status_code == 202
else:
return False
def url(self):
return type(self).url_for(self._id)
def destroy(self):
r = requests.delete(self.url(), headers=type(self).HEADERS)
return self
def __init__(self, _id, response={}):
self._id = _id
self.response = response
class Prompt(LightboxResource):
endpoint = LightboxResource.endpoint_for("prompts")
@classmethod
def create(cls, title="", text="", description=""):
return super(Prompt, cls).create({
'title': title,
'text': text,
'description': description
})
def text(self):
return self.response['text']
class Grader(LightboxResource):
endpoint = LightboxResource.endpoint_for("graders")
@classmethod
def create(cls, prompt_id, name="Grader"):
return super(Grader, cls).create({
'prompt': Prompt.url_for(prompt_id),
'name': name
})
def prompt(self):
p = self.response['prompt']
return Prompt(LightboxResource.id_from_url(p['url']), p)
class Lightbox(LightboxResource):
endpoint = LightboxResource.endpoint_for("lightboxes")
@classmethod
def create(cls, grader_id, name="Lightbox"):
return super(Lightbox, cls).create({
'grader': Grader.url_for(grader_id),
'name': name
})
# TODO set in __init__?
def name(self):
return self.response['name']
def grader(self):
return Grader.find(LightboxResource.id_from_url(self.response['grader']))
def answer_set(self):
aset = self.response['answer_set']
return AnswerSet(LightboxResource.id_from_url(aset['url']), aset)
class Corpus(LightboxResource):
endpoint = LightboxResource.endpoint_for("corpora")
params_endpoint = LightboxResource.endpoint_for("corpus-upload-parameters")
s3_params = {}
# note: description is required
@classmethod
def create(cls, prompt_id=None, description="primary corpus"):
if not prompt_id:
return None
return super(Corpus, cls).create({
'prompt': Prompt.url_for(prompt_id),
'description': description
})
@classmethod
def get_s3_params(cls):
r = requests.get(cls.params_endpoint, headers=cls.HEADERS)
cls.s3_params = r.json()
@classmethod
def send_file(cls, filename):
cls.get_s3_params()
data = {
'AWSAccessKeyId': cls.s3_params['access_key_id'],
'key': cls.s3_params['key'],
'acl': 'public-read',
'Policy': cls.s3_params['policy'],
'Signature': cls.s3_params['signature'],
'success_action_status': '201',
}
files = { 'file': open(filename, 'rb') }
r = requests.post(cls.s3_params['s3_endpoint'], data=data, files=files)
return r.status_code == 201
class CorpusUploadTask(LightboxResource):
endpoint = LightboxResource.endpoint_for("corpus-upload-tasks")
@classmethod
def create(cls, corpus_id, s3_key, content_type="text/csv"):
if not s3_key and 'key' in Corpus.s3_params:
s3_key = Corpus.s3_params['key']
if not corpus_id or not s3_key:
return None
return super(CorpusUploadTask, cls).create({
'corpus': Corpus.url_for(corpus_id),
's3_key': s3_key,
'content_type': content_type
})
class Author(LightboxResource):
endpoint = LightboxResource.endpoint_for("authors")
@classmethod
def create(cls, designator=("%016x" % random.getrandbits(64)), email=""):
return super(Author, cls).create({
'designator': designator,
'email': email
})
class TrainingAnswer(LightboxResource):
endpoint = LightboxResource.endpoint_for("training-answers")
@classmethod
def create(cls, corpus_id, text):
if not corpus_id or not text:
return None
return super(TrainingAnswer, cls).create({
'corpus': Corpus.url_for(corpus_id),
'text': text
})
class ResolvedScore(LightboxResource):
endpoint = LightboxResource.endpoint_for("resolved-scores")
@classmethod
def create(cls, training_answer_id, label):
if not training_answer_id or not label:
return None
return super(ResolvedScore, cls).create({
'training_answer': TrainingAnswer.url_for(training_answer_id),
'label': label
})
class HumanScore(LightboxResource):
endpoint = LightboxResource.endpoint_for("human-scores")
@classmethod
def create(cls, training_answer_id, label):
if not training_answer_id or not label:
return None
return super(HumanScore, cls).create({
'training_answer': TrainingAnswer.url_for(training_answer_id),
'label': label
})
class TrainingTask(LightboxResource):
endpoint = LightboxResource.endpoint_for("training-tasks")
@classmethod
def create(cls, corpus_id, grader_id):
return super(TrainingTask, cls).create({
'corpus': Corpus.url_for(corpus_id),
'grader': Grader.url_for(grader_id)
})
class TrainedModel(LightboxResource):
endpoint = LightboxResource.endpoint_for("trained-models")
class AnswerSet(LightboxResource):
endpoint = LightboxResource.endpoint_for("answer-sets")
# FIXME
@classmethod
def create(cls, lightbox_id):
return super(AnswerSet, cls).create({
'lightbox': Lightbox.url_for(lightbox_id)
})
class Answer(LightboxResource):
endpoint = LightboxResource.endpoint_for("answers")
@classmethod
def create(cls, author_id, answer_set_id, text):
if not author_id or not answer_set_id or not text:
return None
return super(Answer, cls).create({
'author': Author.url_for(author_id),
'answer_set': AnswerSet.url_for(answer_set_id),
'text': text
})
def prediction_result(self):
if 'prediction_results' in self.response and len(self.response['prediction_results']) > 0:
return PredictionResult.find(LightboxResource.id_from_url(self.response['prediction_results'][0]))
else:
return None
def label(self):
pr = self.prediction_result()
if pr:
return pr.label()
else:
return None
class PredictionTask(LightboxResource):
endpoint = LightboxResource.endpoint_for("prediction-tasks")
@classmethod
def create(cls, answer_set_id):
return super(PredictionTask, cls).create({
'answer_set': AnswerSet.url_for(answer_set_id)
})
# GET /prediction-results/
# GET /prediction-results/(int: prediction_result_id)
class PredictionResult(LightboxResource):
endpoint = LightboxResource.endpoint_for("prediction-results")
# TODO cleanup training data and figure out value=0 issue
def label(self):
distribution = {k:v for (k,v) in self.response['distribution'].iteritems() if k != 'D' and k != '4'}
return max(distribution.iteritems(), key=operator.itemgetter(1))[0]
if __name__ == "__main__":
# get s3 params
#Corpus.get_s3_params()
#print Corpus.s3_params
# create a new prompt
#p = Prompt("test", "This is a test. Don't answer me!", "only a test")
#print p._id
# 35, 36 -> test prompts
#Prompt.find(35)
#p = Prompt.create("test2", "Your answer means nothing, NOTHING!", "yet another test")
Prompt.all()
# A Proclamation About Rosa Parks
# Prompt: 20 (?)
# Grader: 34
# Lightbox: 30
# AnswerSet: 30
# Author: 1
# Answer: 1580
# test
# prompt: 35
# grader: 44
# lightbox: 31
# answerset: 31
# answer: 1581
#
# Water Cycle
# prompt: 37
# grader: 45
# lightbox: 33
# answer-set: 33
# corpus: 164
# training answer: 41503
# resolved score: 41503
# human score: 1
# s3 key: corpus-uploads/2014/10/28/56d59c80102d4e7db5bc5707fce24759.csv
# training task: 96
# trained model: 164
# prediction task: 1584
|
UTF-8
|
Python
| false | false | 10,327 |
py
| 8 |
lightbox.py
| 3 | 0.603079 | 0.589716 | 0 | 350 | 28.502857 | 110 |
Dingrongdeer/X-Village-2018-Exercise
| 3,607,772,539,212 |
3d650702ee34f48dd1a42bd3d2a652bb41c40f69
|
b69ae92ed6bf0d7fd73e73aeb6cdd04e4a024931
|
/Lesson02-Python_basic/ex3.py
|
f90abfada8458201ae6120044931aabba465fa36
|
[] |
no_license
|
https://github.com/Dingrongdeer/X-Village-2018-Exercise
|
2c68706b8e6cdabf967f8d73d35f09c77f8545c8
|
856209c28f82798a2ffeede96669ab703f9950e8
|
refs/heads/master
| 2020-03-23T07:44:45.888142 | 2018-07-17T14:06:23 | 2018-07-17T14:06:23 | 141,288,101 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def guess_luckynumber():
import random
lucky_number=random.randint(1,100)
number=int(input("清輸入終極密碼:"))
while number!= lucky_number:
if number<lucky_number:
print ("再大一點")
number=int(input("Please enter an luncky number:"))
else:
print ("再小一點")
number=int(input("Please enter an luncky number:"))
print ("猜對了!")
guess_luckynumber()
|
UTF-8
|
Python
| false | false | 467 |
py
| 1 |
ex3.py
| 1 | 0.571096 | 0.561772 | 0 | 15 | 27.666667 | 63 |
JoshuaGhost/e2expred
| 5,394,478,939,405 |
0a36d847d1859685dc370564028951dc94cb1229
|
a9a7c00fd287f4c33d58719f8ec53c17f8560486
|
/latent_rationale/common/regularizers.py
|
b5fb2d89b25316e57577023b84988b84084edbf5
|
[
"MIT"
] |
permissive
|
https://github.com/JoshuaGhost/e2expred
|
f0ac2bf66c172aad9b47b6d4ebd9ad1ad7ed489b
|
f4dee47c41748a64509b68daee83d97919b6c978
|
refs/heads/main
| 2023-05-14T17:55:13.365088 | 2021-01-13T17:11:14 | 2021-01-13T17:11:14 | 329,074,920 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
def l0_for_hardkuma(dists, mask):
batch_size = mask.size(0)
lengths = mask.sum(1).float()
# pre-compute for regularizers: pdf(0.)
if len(dists) == 1:
pdf0 = dists[0].pdf(0.)
else:
pdf0 = []
for t in range(len(dists)):
pdf_t = dists[t].pdf(0.)
pdf0.append(pdf_t)
pdf0 = torch.stack(pdf0, dim=1) # [B, T, 1]
pdf0 = pdf0.squeeze(-1)
pdf0 = torch.where(mask, pdf0, pdf0.new_zeros([1])) # [B, T]
# L0 regularizer
pdf_nonzero = 1. - pdf0 # [B, T]
pdf_nonzero = torch.where(mask, pdf_nonzero, pdf_nonzero.new_zeros([1]))
l0 = pdf_nonzero.sum(1) / (lengths + 1e-9) # [B]
l0 = l0.sum() / batch_size
return l0
|
UTF-8
|
Python
| false | false | 729 |
py
| 28 |
regularizers.py
| 21 | 0.541838 | 0.496571 | 0 | 26 | 27.076923 | 76 |
Eiriksak/Network_Security
| 3,865,470,594,134 |
9ae42a74ff1b62f22bee91bcec4f82e72ccbb2c4
|
191353ac721faf66d17ae0a281ab9e0436a51e63
|
/Assignment3/test_dsa.py
|
2144bbbe21aca78d71a05291e9bf914c7362bbb3
|
[] |
no_license
|
https://github.com/Eiriksak/Network_Security
|
b9ff2f5356a2c70ceb63ceaefd9a75b6819217a6
|
ada377298cf0f0d550c410f7f511e061cbef0a9b
|
refs/heads/master
| 2023-01-07T00:20:21.748482 | 2020-11-03T19:53:23 | 2020-11-03T19:53:23 | 300,072,354 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import dsa
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['normal', 'attacker'], required=True, help='Test DSA normally or as an attacker')
args = parser.parse_args()
if args.mode == 'normal':
p, q, g = dsa.gen_pubkeys(L=512) # User 1 generates shared global public key values
x, y = dsa.gen_userkeys(p, q ,g) # User 1 generates private and public keys
message = input("(User1) Message: ")
r, s, message = dsa.sign_message(message, p, q, g, x) # User 1 signs message
print(f'(User1) have now signed the message and generated these numbers:\nr: {r}\ns: {s}')
# User 2 will now verify the message it receives from User 1
print(f'\n\n********** (User2) has reveived the message and will now verify it **********')
verified = dsa.verify_message(message, p, q, g, r, s, y)
if verified:
print("The message was verified!")
else:
print("The message was not verifed..")
elif args.mode == 'attacker':
p, q, g = dsa.gen_pubkeys(L=512) # User 1 generates shared global public key values
x, y = dsa.gen_userkeys(p, q ,g) # User 1 generates private and public keys
message = input("(User1) Message: ")
r, s, message = dsa.sign_message(message, p, q, g, x) # User 1 signs message
print(f'(User1) have now signed the message and generated these numbers:\nr: {r}\ns: {s}')
delim = '#'*20
print(f'{delim } ATTACKER INTERUPTS {delim}\nEdit the original message')
message = input('(Attacker) Edit message: ')
# User 2 will now verify the faked message it receives from the attacker
print(f'\n\n********** (User2) has reveived the message and will now verify it **********')
verified = dsa.verify_message(message, p, q, g, r, s, y)
if verified:
print("The message was verified!")
else:
print("The message was not verifed..")
|
UTF-8
|
Python
| false | false | 2,042 |
py
| 21 |
test_dsa.py
| 16 | 0.598433 | 0.587169 | 0 | 41 | 48.658537 | 124 |
morawi/TextGAN
| 3,100,966,416,773 |
16f509a5afb52b503601e0694e395162ce25281d
|
64a9b85e24f8c038384473df988a5a480d8db20b
|
/B_model.py
|
c8fc5834237c3d5d1d9cff4df6c5bb3c5506f926
|
[
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
https://github.com/morawi/TextGAN
|
4a8af74e499b89a7ddded8637c53b1611df11b42
|
1633c7cf6ee60c624da6aa2248c41c264225fa8e
|
refs/heads/master
| 2020-05-14T16:44:44.136345 | 2019-08-28T00:46:37 | 2019-08-28T00:46:37 | 181,877,666 | 2 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 17 11:41:03 2019
@author: Mohammed Al-Rawi
"""
from misc_functions import binarize_tensor
def get_loss_of_B_classify(real_A, real_B): # item is a string 'A', 'A_neg' of the sample
optimizer_classify.zero_grad()
B_ = binarize_tensor(G_AB(real_A).detach())
output = B_classify(B_)
target = criterion_classify_labeling(B_, binarize_tensor(real_B))
loss_B = criterion_classify(output, target)
return loss_B
def train_B_classify(no_epochs):
print('Training B-Classifier')
B_classify.train()
loss_of_model = []
# time.sleep(1); # pbar = tqdm(total=no_epochs); # time.sleep(1)
loss = None
for epoch_ in range(1, no_epochs):
scheduler_B.step()
total_loss = 0
# pbar.update(1)
for i, batch in enumerate(dataloader):
real_B_pos = batch['B'].type(Tensor).to(device) # +ve Ground Truth
real_B_neg = batch['B_neg'].type(Tensor).to(device) # -ve Ground Truth
loss_B_pos = get_loss_of_B_classify(batch['A'].type(Tensor).to(device),
real_B_pos) # ''' +ve phase '''
loss_B_neg = get_loss_of_B_classify(batch['A_neg'].type(Tensor).to(device),
real_B_neg) # ''' -ve phase pass '''
loss = torch.min(loss_B_pos, loss_B_neg) / (
torch.max(loss_B_pos, loss_B_neg)+ 1)
loss.backward()
optimizer_classify.step()
total_loss += loss.cpu().data.detach().numpy().tolist()
print(', ', total_loss/len(dataloader.dataset), end='')
loss_of_model.append(total_loss/len(dataloader.dataset))
return loss_of_model
def test_B_classify(test_loss, test_dataloader):
loss = 0
B_classify.eval()
with torch.no_grad():
for batch_idx, batch in enumerate(test_dataloader):
real_B = batch['B'].type(Tensor) # since we are thresholding, there is no difference between B and B_neg
real_A_pos = batch['A'].type(Tensor)
real_A_neg = batch['A_neg'].type(Tensor)
GAN_B_pos = G_AB(real_A_pos).detach()
GAN_B_neg = G_AB(real_A_neg).detach()
out_B_pos = B_classify(binarize_tensor(GAN_B_pos)).detach() # if we do thresholding in training, we should do it here
out_B_neg = B_classify(binarize_tensor(GAN_B_neg)).detach()
if out_B_neg<out_B_pos:
B_good = GAN_B_neg
else: B_good = GAN_B_pos
loss += test_loss(real_B, B_good)
x = G_AB( G_AB(GAN_B_pos) + G_AB(GAN_B_neg ) )
img_sample = torch.cat(
(real_A_pos,
real_B,
GAN_B_pos,
GAN_B_neg,
G_AB(GAN_B_pos),
G_AB(GAN_B_neg),
x
),
0)
save_image(img_sample, 'images/%s/%s.png' %
(opt.dataset_name, batch_idx), nrow=7, normalize=True)
model_id = 1
torch.save(B_classify.state_dict(), 'saved_models/%s/model_classify_%d.pth' % (opt.dataset_name, model_id))
return loss/len(test_dataloader.dataset)
#lr_classify = 0.01
#B_classify = torchvis_models.resnet18(pretrained=True)
#B_classify.fc = torch.nn.Linear(2048, 1) #2048 for 256x256 image
#B_classify.aux_logits = False
#B_classify = B_classify.to(device)
#optimizer_classify = torch.optim.Adam(B_classify.parameters(), lr=lr_classify)
#criterion_classify_labeling = torch.nn.L1Loss()
#criterion_classify = torch.nn.L1Loss()
#scheduler_B = torch.optim.lr_scheduler.MultiStepLR(optimizer_classify,
# milestones=[50, 150, 300] , gamma= 0.1)
# my_loss = train_B_classify(100)
test_performance = test_B_classify(criterion_classify, val_dataloader)
print(test_performance.item())
|
UTF-8
|
Python
| false | false | 4,336 |
py
| 9 |
B_model.py
| 7 | 0.515913 | 0.502306 | 0 | 104 | 40.701923 | 129 |
danieltimko/aoc2020
| 9,431,748,189,525 |
0d9b0f26d355be4808c1b6ca3fd7cf0be277ad7f
|
5a6aa88acebefcb79d7e33ed0898b9f841816119
|
/2021/21.py
|
afd295bfcd248abb0a722ee107670e6be3e980ee
|
[] |
no_license
|
https://github.com/danieltimko/aoc2020
|
396cc7ce1380f30f18dbb3523ad9a7b5025dc34f
|
8961ec2774ed30cb49e8c479e9b6989d31557745
|
refs/heads/master
| 2023-02-07T05:23:01.748630 | 2022-12-25T06:14:02 | 2022-12-25T06:14:02 | 318,085,942 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from itertools import product
def task1(positions):
def roll():
n = 1
while True:
yield n
n = 1 if n == 100 else n+1
scores = [0, 0]
rolls = 0
gen = roll()
while True:
for i in range(2):
rollsum = next(gen) + next(gen) + next(gen)
rolls += 3
positions[i] = (positions[i] + rollsum) % 10
scores[i] += positions[i] + 1
if scores[i] >= 1000:
return rolls * min(scores)
def task2(positions):
def get_subresult(pos1, pos2, score1, score2):
if score1 >= 21:
return 1, 0
if score2 >= 21:
return 0, 1
if (pos1, pos2, score1, score2) in cache:
return cache[(pos1, pos2, score1, score2)]
n1 = 0
n2 = 0
for rolls in list(product([1, 2, 3], repeat=3)):
new_pos1 = (pos1 + sum(rolls)) % 10
# switch players every round
subresult2, subresult1 = get_subresult(pos2, new_pos1, score2, score1+new_pos1+1)
n1 += subresult1
n2 += subresult2
cache[(pos1, pos2, score1, score2)] = (n1, n2)
return n1, n2
cache = {}
return max(get_subresult(positions[0], positions[1], 0, 0))
def run():
print(task1([1, 0]))
print(task2([1, 0]))
run()
|
UTF-8
|
Python
| false | false | 1,347 |
py
| 75 |
21.py
| 74 | 0.499629 | 0.437268 | 0 | 52 | 24.903846 | 93 |
aajenkins/LTSPM
| 14,826,227,125,581 |
f4fe4554070b44c0b2e700d016e90bfa26f29804
|
4964e8c02053a9616c3fe4800cacd6d74acb2f34
|
/cofeb_analysis/irmn/image_erosion_stripe_width.py
|
64c6522110b9e9c7c92a291422ced4e22915e88e
|
[] |
no_license
|
https://github.com/aajenkins/LTSPM
|
cbb714c68701fc47f196d29f383fa9fcd6a51f5c
|
75f640e1b2a7173b083690ec4c84337199bd44ab
|
refs/heads/master
| 2020-07-03T15:35:10.578245 | 2018-03-19T21:45:37 | 2018-03-19T21:45:37 | 74,162,494 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# @Author: Jenkins Alec <alec>
# @Date: 2017-02-15T11:26:35-08:00
# @Project: LTSPM analysis
# @Last modified by: alec
# @Last modified time: 2017-02-15T12:01:11-08:00
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import numpy as np
from scipy.ndimage.morphology import binary_erosion
from scipy.misc import imread
scannum = 1836
impath = '/Users/alec/UCSB/scan_images/irmn/domains'+str(scannum)+'contour.png'
domains = imread(impath, flatten=True)
bdomains = 1*domains
bdomains = np.multiply(1/255,domains).astype(int)
number_erosions = 15
image_sum = np.zeros((number_erosions))
for i in range(number_erosions):
image_sum[i] = np.sum(bdomains)
bdomains = binary_erosion(bdomains).astype(np.int)
plt.close('all')
fig1, ax1 = plt.subplots()
plt.imshow(bdomains)
fig1, ax1 = plt.subplots()
plt.plot(image_sum)
plt.show()
|
UTF-8
|
Python
| false | false | 859 |
py
| 154 |
image_erosion_stripe_width.py
| 142 | 0.733411 | 0.67404 | 0 | 34 | 24.264706 | 79 |
styler00dollar/Colab-BasicSR
| 16,741,782,554,234 |
7649bf53ad2b2c5763ff30b3460145cc11fa1683
|
af94f63c4ba68c1e06e2550449569a11ee53bee6
|
/code/scripts/fix_state_dict.py
|
adc8e1eb5f274a918680b2b43764a34d24d5f3cf
|
[] |
no_license
|
https://github.com/styler00dollar/Colab-BasicSR
|
4b79da57a7afaff32bf2d671442a5171e0cc1e87
|
626d93bf8d3d3db5447ae6946d4ba8bb3d00a482
|
refs/heads/master
| 2023-06-13T04:15:05.808917 | 2023-05-27T01:17:56 | 2023-05-27T01:17:56 | 321,114,389 | 4 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
# from this model
model1 = torch.load("team04_rlfn.pth")
# into this model
model2 = torch.load("Checkpoint_0_0_G.pth")
for k in model1.keys():
try:
if "upsampler.0" in k:
continue
print(f"setting {k}")
model2[k] = model1[k]
except Exception as e:
print(e)
pass
torch.save(model2, "fixed.pth")
print("done")
|
UTF-8
|
Python
| false | false | 385 |
py
| 106 |
fix_state_dict.py
| 95 | 0.587013 | 0.558442 | 0 | 21 | 17.333333 | 43 |
stereoliza/learning
| 1,082,331,762,265 |
31a31325c86ce42de44fc7575a92def0b72ef878
|
7918b5e9997744e2a641c231c80dd257ee742460
|
/main.py
|
e7a3602669d3d9221f0d46fac13caae3a28cf828
|
[] |
no_license
|
https://github.com/stereoliza/learning
|
cc72cd5d907ae06e01f73bbc30be3499fcd20e6c
|
6268095ee02c6fcdda54dd86dc26f37a51ffc229
|
refs/heads/main
| 2023-09-06T02:24:00.169886 | 2021-11-11T23:41:24 | 2021-11-11T23:41:24 | 394,315,193 | 0 | 0 | null | false | 2021-11-11T23:41:25 | 2021-08-09T14:07:20 | 2021-10-30T20:22:19 | 2021-11-11T23:41:24 | 33 | 0 | 0 | 0 |
Python
| false | false |
from location import find_location
import sys
ip = sys.argv[1]
location = find_location(str(ip))
print(location)
|
UTF-8
|
Python
| false | false | 119 |
py
| 11 |
main.py
| 10 | 0.731092 | 0.722689 | 0 | 7 | 15.428571 | 34 |
BahruDa/alx-higher_level_programming-2
| 15,367,393,024,180 |
cd7d96d55cb8b434fca5647acb17ec73ede09aa8
|
b7f25449c2baae8823f1307e134a775d6ab64272
|
/0x0F-python-object_relational_mapping/model_state.py
|
ff208ecad1588628c4dad4a3a23774bab485effb
|
[] |
no_license
|
https://github.com/BahruDa/alx-higher_level_programming-2
|
85f5155328750430949d49e8d2d27761c71360fe
|
047d1dd0dec5e942e9c638d0e4cc5fc985061b56
|
refs/heads/main
| 2023-08-15T03:07:14.699713 | 2021-09-22T23:30:52 | 2021-09-22T23:30:52 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
"""
a python module that contains the class definition of
a State and an instance Base = declarative_base():
"""
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class State(Base):
'''
defines a state
Attr:
id (int): auto-generated, unique integer, not null, & is a primary key
name (): a column of a string with 128 chars and can’t be null
'''
__tablename__ = 'states'
id = Column(Integer, primary_key=True, nullable=False, unique=True)
name = Column(String(128), nullable=False)
def __init__(self, name):
self.name = name
|
UTF-8
|
Python
| false | false | 679 |
py
| 123 |
model_state.py
| 95 | 0.67356 | 0.66322 | 0 | 26 | 25.038462 | 74 |
paulhodge/graveyard
| 2,791,728,745,102 |
a17ec3de4d11d39dde5b6e8c6e885ae0bcf9c060
|
16294f14ad0e8d2e29201fd1c3d132b7793481ca
|
/euler_11.py
|
aba69eb057fd15cdc072c27a6d66fbd35bf2e0a0
|
[] |
no_license
|
https://github.com/paulhodge/graveyard
|
05fc3519a9c26be88947867d55980c8a11a573e7
|
9862a8ddc69bebb8f8df2a57647360d07da34efc
|
refs/heads/master
| 2015-08-06T09:34:28.833025 | 2012-02-02T06:31:00 | 2012-02-02T06:31:00 | 2,037,445 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def quadratic(a, b, c=None):
import math, cmath
if c: # (ax^2 + bx + c = 0)
a, b = b / float(a), c / float(a)
t = a / 2.0
r = t**2 - b
if r >= 0: # real roots
y1 = math.sqrt(r)
else: # complex roots
y1 = cmath.sqrt(r)
y2 = -y1
return y1 - t, y2 - t
def triangle_numbers():
i = 1
result = 1
yield result
while True:
i += 1
result += i
yield result
def triangle(i):
out = int(i*i / 2 + i / 2)
if i % 2 == 1:
out = out + 1
return out
print map(triangle, range(1,20))
def find_divisors(n):
result = set()
for d in range(2, n):
if (n / d) * d == n:
result.add(d)
result.add(1)
result.add(n)
return result
def product(l):
out = 1
for i in l:
out *= i
return out
smallest_with_500_divisors = product(range(1,501))
print smallest_with_500_divisors
print quadratic(.5,.5,smallest_with_500_divisors)
def find():
for n in triangle_numbers():
if n < smallest_with_500_divisors:
continue
print n
if len(find_divisors(n)) > 500:
print find_divisors(n)
print n
exit(0)
|
UTF-8
|
Python
| false | false | 1,168 |
py
| 18 |
euler_11.py
| 4 | 0.527397 | 0.486301 | 0 | 60 | 18.45 | 50 |
kimlin20011/data_mining_assignment
| 5,308,579,620,768 |
74e16a6b4a4df6ae972c01703c9a495263bca960
|
c4cbbbeded13f7cfefbd9a40932bc083c6ae0e8d
|
/q2_6/test.py
|
972f2d5b73acab962ad5f0037e51d457ff7d58ff
|
[] |
no_license
|
https://github.com/kimlin20011/data_mining_assignment
|
e0e51d94b5822aa91b5d1c2d2317ba829ad1ef26
|
527463b1005877def75e5100ee9088b423016fb8
|
refs/heads/main
| 2023-06-24T04:54:33.078896 | 2021-07-28T07:08:58 | 2021-07-28T07:08:58 | 389,860,600 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from sklearn import tree
import pandas as pd
import graphviz
df = pd.read_csv("data.csv", encoding="utf-8")
print(df)
features = list(df.columns[1:21])
X = df[features]
y = df["class"]
classifier = tree.DecisionTreeClassifier()
classifier = classifier.fit(X, y)
tree.plot_tree(classifier)
dot_data = tree.export_graphviz(classifier, out_file=None,
feature_names=features,
class_names=["不能打羽球", "可以打球"],
filled=True, rounded=True, leaves_parallel=True)
graph = graphviz.Source(dot_data)
|
UTF-8
|
Python
| false | false | 603 |
py
| 22 |
test.py
| 12 | 0.622222 | 0.615385 | 0 | 19 | 29.789474 | 80 |
kowoolim/youtube
| 15,736,760,181,537 |
c1fb57c8b8b67246e66583bc669d2c50b115faf0
|
8e8d193d504ce8b136efe8d8c451eb00629b9b7c
|
/address/admin.py
|
99e45cb95ba198bb59a88bfd0effea5dfaff6069
|
[] |
no_license
|
https://github.com/kowoolim/youtube
|
b8a84e57f57b6ca5430a3f9b3186efa37a7aa06a
|
fd1bfe3870bb5770e9590dd767405c3e7e272567
|
refs/heads/master
| 2023-03-12T11:44:46.495748 | 2021-03-01T09:18:09 | 2021-03-01T09:18:09 | 343,347,541 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.contrib import admin
from address.models import Address
class AddressAdmin(admin.ModelAdmin):
#화면에 출력할 필드 목록을 튜플로 지정
list_display = ("name","tel","email","address")
admin.site.register(Address, AddressAdmin)
|
UTF-8
|
Python
| false | false | 262 |
py
| 28 |
admin.py
| 18 | 0.747826 | 0.747826 | 0 | 8 | 27.75 | 51 |
csmith49/bach
| 19,310,172,996,698 |
72ceac3369c60850c964cf69ec3b34904b7bdd6d
|
1dcb9060349ac3136b39dc3bb552b9e8b39d66be
|
/tools/epee/epee.py
|
2df0eec0854761b4bced94850e78dfaf5fe41ea9
|
[] |
no_license
|
https://github.com/csmith49/bach
|
8b0b2c04a68f3a0155083d4f7997a279f5e3e4a5
|
80b0c1269de3786ab76d627c4fd85205d640ca12
|
refs/heads/master
| 2021-05-07T23:15:29.353469 | 2017-10-18T14:18:48 | 2017-10-18T14:18:48 | 107,418,191 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from argparse import ArgumentParser
from inspect import signature
from importlib import machinery
from csv import writer
import os
# store generators in a map, use decorator to tag them
# label them with add_generator anywhere after this section
_GENERATORS = {}
def add_generator(key):
def deco(f):
_GENERATORS[key] = f()
return f
return deco
def generate(*args):
return [next(_GENERATORS[arg]) for arg in args]
# machinery for loading annotated functions from a file
class Function(object):
def __init__(self, f):
self._function = f
params = signature(f).parameters
self.inputs = [params[p].annotation for p in list(params)]
self.name = self._function.__name__
def __call__(self, *args):
return self._function(*args)
def load_functions(path):
module = machinery.SourceFileLoader('sig', path).load_module()
functions = []
for name in filter(lambda s: "__" not in s, dir(module)):
functions.append(Function(getattr(module, name)))
return functions
# entry point - args is the structure provided by argparse
def main():
parser = ArgumentParser(prog="epee", description="generating CSV files for Bach")
parser.add_argument('functions')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-d', '--delimiter', type=str, default="\t")
parser.add_argument('-c', '--count', type=int, default=1000)
parser.add_argument('-o', '--output', default="output")
parser.add_argument('-e', '--error', default="")
args = parser.parse_args()
functions = load_functions(os.path.join(os.getcwd(), args.functions))
for f in functions:
out = "{}/{}.facts".format(args.output, f.name)
filename = os.path.join(os.getcwd(), out)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w', newline='') as csvfile:
csvwriter = writer(csvfile, delimiter=args.delimiter)
for i in range(args.count):
inputs = generate(*f.inputs)
try:
output = f(*inputs)
except Exception as e:
if args.verbose:
print(e)
output = args.error
csvwriter.writerow(inputs + [output])
# GENERATORS
# for dict
@add_generator('dict')
def dict_gen():
from random import randint
while True:
size = randint(0, 3)
d = {}
for i in range(1, size + 1):
key = randint(0, 2)
value = randint(0, 1)
d[key] = value
yield d
# for finitefield (actually does fp_199)
@add_generator('fp17')
def fp_gen():
from random import randint
while True:
yield randint(0, 198)
# for geometry
@add_generator('point')
def point_gen():
from random import randint
while True:
x = randint(-1, 1)
y = x
yield (x, y)
@add_generator('rect')
def rect_gen():
from random import randint
while True:
x = randint(-1, 1)
y = x
size = randint(1, 6)
yield (x, y, size)
@add_generator('int')
def int_gen():
from numpy import random
while True:
a = random.normal(0, 2, 1)
yield int(a[0])
@add_generator('rad')
def rad_gen():
from numpy import random
while True:
a = random.normal(0, 2, 1)
yield int(a[0])
@add_generator('posint')
def posint_gen():
from random import randint
while True:
a = randint(1, 3)
yield a
# for list
@add_generator('cons_int')
def consint_gen():
from random import randint
while True:
yield randint(0, 2)
@add_generator('list')
def list_gen():
from random import randint
while True:
size = randint(0, 3)
yield [randint(0, 2) for r in range(size)]
# for matrix
@add_generator('tensor')
def tensor_gen():
from random import randint
while True:
x1 = randint(-1, 2)
y1 = randint(-1, 1)
x2 = randint(-1, 1)
y2 = randint(-1, 1)
yield (x1, x2, y1, y2)
# for queue
@add_generator('queue')
def queue_gen():
from random import randint
while True:
size = randint(0, 3)
s = []
for i in range(1, size + 1):
value = randint(0, 1)
s.append(value)
yield s
# for sets
@add_generator('set')
def set_gen():
from random import randint
from numpy import random
while True:
size = abs(int(random.normal(0, 3, 1)[0]))
s = set([])
for i in range(1, size + 1):
value = randint(0, 2)
s.add(value)
yield s
# for strings
@add_generator('string')
def string_gen():
from random import randint
while True:
size = randint(0,4)
s = ""
for i in range(1, size + 1):
x = randint(0, 2)
if x == 0:
s += "1"
if x == 1:
s += "a"
if x == 2:
s += "A"
if s == "": s = "@"
yield s
# for trig
@add_generator('arc')
def arc_gen():
from random import randint
while True:
yield randint(-10, 10)
@add_generator('radian')
def radian_gen():
from random import randint
while True:
yield randint(-10, 10)
# if called as a standalone, execute as expected
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 5,411 |
py
| 107 |
epee.py
| 20 | 0.567548 | 0.55073 | 0 | 211 | 24.64455 | 85 |
pokuk76/HandsOnScikitLearnAndTensorflow
| 12,275,016,560,309 |
a30ea85709208cac42a6bc1e0f57b5137f71de4f
|
e1d133db78cac139e6fa9530f9431b6e81085a45
|
/Classification/fold.py
|
bd6afd95fcaa89f2a6522b7662a0db313ba9007a
|
[] |
no_license
|
https://github.com/pokuk76/HandsOnScikitLearnAndTensorflow
|
0113dc5439ce91ee5426ac91e51b842a70f2d14e
|
17b87108da2be980ec913fa1b344f9a5cab7c100
|
refs/heads/master
| 2020-12-15T08:07:00.039241 | 2019-12-05T13:40:04 | 2019-12-05T13:40:04 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import pullMnist as pm
import split_train_test as stt
def foldMnistData(X_train, y_train):
shuffle_index = np.random.permutation(60000)
return X_train[shuffle_index], y_train[shuffle_index]
mnist = pm.pull_mnist()
X_train, X_test, y_train, y_test = stt.split_train_test(mnist)
X_train, y_train = foldMnistData(X_train, y_train)
|
UTF-8
|
Python
| false | false | 368 |
py
| 8 |
fold.py
| 8 | 0.711957 | 0.69837 | 0 | 11 | 31.454545 | 62 |
int-brain-lab/iblscripts
| 8,280,696,963,030 |
c426e4479613459ce98a25df744efc651919ee9c
|
3c58e1d04473e8ec8b2c812a492bb7392d4dd5a8
|
/deploy/serverpc/utils/scan_fix_passive_files.py
|
ec3cf9e21a4f7feaa4cd523a9746183a5fa7a3e6
|
[
"MIT"
] |
permissive
|
https://github.com/int-brain-lab/iblscripts
|
cf2d401ca555f20c0eea1a0d78c7e78330fb809b
|
44e828c3c3607bb326569bbaf136951dfb1fabad
|
refs/heads/master
| 2023-08-31T23:59:04.889864 | 2023-08-04T14:42:56 | 2023-08-04T14:42:56 | 187,838,029 | 3 | 9 |
MIT
| false | 2023-08-18T18:14:14 | 2019-05-21T13:00:12 | 2023-06-27T20:36:36 | 2023-08-18T18:14:14 | 11,950 | 3 | 8 | 7 |
Jupyter Notebook
| false | false |
from pathlib import Path
import argparse
import logging
from ibllib.pipes.scan_fix_passive_files import execute
log = logging.getLogger('ibllib')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Scan and fix badly transfered passive sessions')
parser.add_argument('root_data_folder', default='/mnt/s0/Data', help='Root data folder [/mnt/s0/Data]')
parser.add_argument('--dry', required=False, default=False,
action='store_true', help='Dry run? default: False')
args = parser.parse_args() # returns data from the options specified (echo)
root_path = Path(args.root_data_folder)
if not root_path.exists():
log.error(f"{root_path} does not exist")
from_to_pairs, moved_ok = execute(root_path, dry=args.dry)
if args.dry:
log.info(from_to_pairs)
|
UTF-8
|
Python
| false | false | 842 |
py
| 162 |
scan_fix_passive_files.py
| 110 | 0.67696 | 0.674584 | 0 | 21 | 39.095238 | 107 |
MrJVM/Python-Practice
| 609,885,356,838 |
7a91052052d19dabd406fc8d5153002de1afe39b
|
a950c299e0aab8acde0540bb0efd66ffe66177a2
|
/basics/prime_practice.py
|
7a76fd92fab6d55d4dda60a6b718aab6a88e6c96
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/MrJVM/Python-Practice
|
3cb06a159e11f55179a5dcd2701f7125a9e49f94
|
2f8716b149afa18f6979329cc12036e63c515823
|
refs/heads/master
| 2021-04-24T00:26:52.442841 | 2020-04-08T17:26:32 | 2020-04-08T17:26:32 | 250,043,590 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from math import sqrt
value = 10
print(f'The value is {value}')
d = dict(tom='22',sam='50')
for k,v in d.items():
print("Key={} Value={}",k,v)
#Collection comprehension
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x))+1):
if x%i == 0:
return False
return True
l = [x for x in range(101) if is_prime(x)]
print(l)
|
UTF-8
|
Python
| false | false | 384 |
py
| 7 |
prime_practice.py
| 6 | 0.565104 | 0.53125 | 0 | 21 | 17.333333 | 43 |
vindafadilla/ta_backend
| 11,252,814,315,618 |
222dae8bef272455ca93060e6e890e58f9c7cac9
|
dea2639a56122d36624d7ab2687bff4a32c8cd90
|
/datacleaning/dataCleaning.py
|
f875fd4e8e3ecbd24d497566d4218f6a8e038daf
|
[] |
no_license
|
https://github.com/vindafadilla/ta_backend
|
139bb8e4026f07ca4a6658683e466f0ce3fe486e
|
db4b819bda668c1531d12c49ca61454dec2b57bb
|
refs/heads/master
| 2021-01-20T04:12:27.883453 | 2017-06-21T01:38:53 | 2017-06-21T01:38:53 | 89,660,284 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from ta_backend.datapreparation.dataTwitter import DataTwitter
from ta_backend.datapreparation.appropriateDataTweetFilter import AppropriateDataTweetFilter
from ta_backend.datacleaning.normalization import Normalization
class DataCleaning:
def dataCleaningProcess(self, listTweet):
try:
normalization = Normalization()
print(listTweet["tweetList"][1]["text"])
listDataTweetNormalized = []
for index in range(len(listTweet["tweetList"])):
dataTweetWithoutHTMLChar = normalization.escapingHTMLChar(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetWithoutHTMLChar
dataTwitterURLRemoved = normalization.urlRemoval(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTwitterURLRemoved
dataTweetWhitespacesNormalized = normalization.whitespace(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetWhitespacesNormalized
dataTweetAbbreviationNormalized = normalization.abbreviations(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetAbbreviationNormalized
dataTweetContractionNormalized = normalization.contractions(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetContractionNormalized
dataTweetSpellfixNormalized = normalization.spellfix(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetSpellfixNormalized
dataTweetAttachedWordsSplitted = normalization.splitAttachedWords(listTweet["tweetList"][index]["text_processed"])
listTweet["tweetList"][index]["text_processed"] = dataTweetAttachedWordsSplitted
listDataTweetNormalized.append(listTweet["tweetList"][index])
tweetperDay = {
'datePeriod': listTweet["datePeriod"],
'tweetList': listDataTweetNormalized
}
return tweetperDay
except Exception as e:
return e
# def insertToDatabase(self, listTweet):
|
UTF-8
|
Python
| false | false | 2,380 |
py
| 29 |
dataCleaning.py
| 27 | 0.686134 | 0.685714 | 0 | 37 | 63.351351 | 130 |
wani-hackase/wani-writeup
| 14,379,550,507,173 |
16f266b31fda360387ad509d4036acc0ab56e6f4
|
bffb95af93d36ecff3ed7a7cd991a6a2b82b1d57
|
/2020/03-b01lers/cry-safety/solve.py
|
48bddd88f6632e100db13db3b0ad0351d9c701d0
|
[
"MIT"
] |
permissive
|
https://github.com/wani-hackase/wani-writeup
|
287159907aa2bb54e119bbb907836574871fa455
|
dd4ad0607d2f2193ad94c1ce65359294aa591681
|
refs/heads/master
| 2021-12-27T16:48:21.017138 | 2021-12-16T10:30:14 | 2021-12-16T10:30:14 | 170,531,761 | 26 | 1 |
MIT
| false | 2020-06-25T07:29:11 | 2019-02-13T15:32:43 | 2020-05-28T03:48:01 | 2020-06-25T07:27:17 | 55,790 | 17 | 0 | 0 |
HTML
| false | false |
import gmpy2
e = 0x10001
with open('flag.enc', 'rb') as f:
flag = f.read()
flag = int.from_bytes(flag, byteorder='little')
m, exact = gmpy2.iroot(flag, e)
m = int(m)
if exact:
flag = m.to_bytes(m.bit_length()//8+1, byteorder='little')
print(flag)
else:
print('Failed')
|
UTF-8
|
Python
| false | false | 289 |
py
| 235 |
solve.py
| 86 | 0.619377 | 0.584775 | 0 | 16 | 17.0625 | 62 |
HonglinChu/SiamTrackers
| 9,603,546,880,003 |
99b4f1a19786ba49db6b026b9f543f6849c04de9
|
abbc2d332bdfa036ac12438983e6d74cf4107e64
|
/SiamFCpp/SiamFCpp-video_analyst/siamfcpp/evaluation/davis_benchmark/davis2017/results.py
|
0c39580725cf40366e2e87f9096b7f733595fb2c
|
[] |
permissive
|
https://github.com/HonglinChu/SiamTrackers
|
c494cff7543a433e8ec7dbf6d9439b1e7395b0c0
|
805208b5348346d35e64abcbe901a3829743e157
|
refs/heads/master
| 2023-08-29T06:50:59.532271 | 2023-03-06T09:13:53 | 2023-03-06T09:13:53 | 253,718,080 | 1,166 | 243 |
Apache-2.0
| false | 2023-08-03T16:39:53 | 2020-04-07T07:24:00 | 2023-08-03T02:47:52 | 2023-08-03T16:39:53 | 67,076 | 1,054 | 227 | 86 |
Python
| false | false |
import os
import numpy as np
from PIL import Image
import sys
class Results(object):
def __init__(self, root_dir):
self.root_dir = root_dir
def _read_mask(self, sequence, frame_id):
try:
mask_path = os.path.join(self.root_dir, sequence,
'{}.png'.format(frame_id))
return np.array(Image.open(mask_path))
except IOError as err:
sys.stdout.write(sequence + " frame %s not found!\n" % frame_id)
sys.stdout.write(
"The frames have to be indexed PNG files placed inside the corespondent sequence "
"folder.\nThe indexes have to match with the initial frame.\n")
sys.stderr.write("IOError: " + err.strerror + "\n")
sys.exit()
def read_masks(self, sequence, masks_id):
mask_0 = self._read_mask(sequence, masks_id[0])
masks = np.zeros((len(masks_id), *mask_0.shape))
for ii, m in enumerate(masks_id):
masks[ii, ...] = self._read_mask(sequence, m)
num_objects = int(np.max(masks))
tmp = np.ones((num_objects, *masks.shape))
tmp = tmp * np.arange(1, num_objects + 1)[:, None, None, None]
masks = (tmp == masks[None, ...]) > 0
return masks
|
UTF-8
|
Python
| false | false | 1,285 |
py
| 629 |
results.py
| 318 | 0.558755 | 0.554086 | 0 | 33 | 37.939394 | 98 |
Rapator-Tech-Academy/rapator-final-project
| 335,007,452,715 |
6c79149ac877d2d8fc101ba38d8dd241d769e539
|
06d1a334cbf7c02db43ba3890a9ef8f1d862123e
|
/app/core/admin.py
|
d7d55b2b3cb3a3f3aedede5907f9255d711cf992
|
[] |
no_license
|
https://github.com/Rapator-Tech-Academy/rapator-final-project
|
42a85b7229e7b68a806c48fb69e9a65f21f2fd62
|
63e6df63f7a16d5402a7b437da3939d93549ca8e
|
refs/heads/main
| 2023-03-19T04:21:30.527519 | 2021-03-04T09:09:28 | 2021-03-04T09:09:28 | 322,076,812 | 0 | 2 | null | false | 2021-03-04T09:09:29 | 2020-12-16T19:06:10 | 2021-02-24T00:48:14 | 2021-03-04T09:09:28 | 31,204 | 0 | 1 | 0 |
Python
| false | false |
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.db import models
from mptt.admin import MPTTModelAdmin
from ckeditor.widgets import CKEditorWidget
from mptt.admin import MPTTModelAdmin
from core.models import Category, City, ProductImage, Product
class FlatPageCustom(FlatPageAdmin):
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget}
}
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageCustom)
admin.site.register(City)
admin.site.register(Category, MPTTModelAdmin)
admin.site.register(ProductImage)
admin.site.register(Product)
|
UTF-8
|
Python
| false | false | 701 |
py
| 83 |
admin.py
| 37 | 0.810271 | 0.810271 | 0 | 27 | 24.925926 | 61 |
hugovk/EveryElection
| 1,778,116,494,175 |
f1e817b0dae2360b08971327f9c86d274decdb0c
|
d8c209163273688c9eb460674b89a10a951e4cb4
|
/every_election/apps/organisations/constants.py
|
bb4ffe68603cb5280acb1724b7483e2bfd8bf24c
|
[
"MIT"
] |
permissive
|
https://github.com/hugovk/EveryElection
|
9fa51edee42de06ab8caff225c64dfed4c61211b
|
b79123347dbfe1f7610f3b092fb5418b8e12f393
|
refs/heads/master
| 2018-04-07T15:37:19.278115 | 2017-04-25T13:08:19 | 2017-04-25T13:08:19 | 89,626,719 | 0 | 0 | null | true | 2017-04-27T18:18:44 | 2017-04-27T18:10:16 | 2017-04-27T18:10:19 | 2017-04-27T18:12:30 | 32,698 | 0 | 0 | 1 |
Python
| null | null |
PARENT_TO_CHILD_AREAS = {
'DIS': ['DIW',],
'MTD': ['MTW',],
'CTY': ['CED',],
'LBO': ['LBW',],
'CED': ['CPC',],
'UTA': ['UTW', 'UTE'],
'NIA': ['NIE',],
'COI': ['COP',],
}
CHILD_TO_PARENT_AREAS = {
'DIW': 'DIS',
'MTW': 'MTD',
'UTW': 'UTA',
'UTE': 'UTA',
'CED': 'CTY',
'LBW': 'LBO',
'CPC': 'CED',
'COP': 'COI',
}
AREAS_WITHOUT_PCCS = [
"metropolitan",
"city-of-london",
"northern-ireland",
]
AREAS_IN_WALES = [
'south-wales',
'north-wales',
'gwent',
'dyfed-powys',
]
POLICE_AREA_NAME_TO_GSS = {
'avon-and-somerset': [
'E10000027', 'E06000022', 'E06000023', 'E06000024', 'E06000025'],
'bedfordshire': ['E06000055', 'E06000056', 'E06000032'],
'cambridgeshire': ['E10000003', 'E06000031'],
'cheshire': ['E06000049', 'E06000050', 'E06000006', 'E06000007'],
'cleveland': ['E06000001', 'E06000002', 'E06000003', 'E06000004'],
'cumbria': ['E10000006'],
'derbyshire': ['E10000007', 'E06000015'],
'devon-and-cornwall': [
'E10000008', 'E06000052', 'E06000026', 'E06000027', 'E06000053'],
'dorset': ['E10000009', 'E06000028', 'E06000029'],
'durham': ['E06000005', 'E06000047'],
'dyfed-powys': ['W06000008', 'W06000010', 'W06000009', 'W06000023'],
'essex': ['E10000012', 'E06000033', 'E06000034'],
'gloucestershire': ['E10000013'], 'warwickshire': ['E10000031'],
'greater-manchester': [
'E08000001', 'E08000002', 'E08000003', 'E08000004', 'E08000005',
'E08000006', 'E08000007', 'E08000008', 'E08000009', 'E08000010'],
'gwent': ['W06000021', 'W06000019', 'W06000018', 'W06000022', 'W06000020'],
'hampshire': ['E10000014', 'E06000046', 'E06000044', 'E06000045'],
'hertfordshire': ['E10000015'],
'humberside': ['E06000011', 'E06000012', 'E06000013', 'E06000010'],
'kent': ['E10000016', 'E06000035'],
'lancashire': ['E10000017', 'E06000008', 'E06000009'],
'leicestershire': ['E10000018', 'E06000016', 'E06000017'],
'lincolnshire': ['E10000019'],
'merseyside': [
'E08000011', 'E08000012', 'E08000014', 'E08000015', 'E08000013'],
'norfolk': ['E10000020'],
'north-wales': ['W06000001', 'W06000002', 'W06000004',
'W06000005', 'W06000003', 'W06000006'],
'north-yorkshire': ['E10000023', 'E06000014'],
'northamptonshire': ['E10000021'],
'northumbria': ['E06000057', 'E08000037', 'E08000021',
'E08000022', 'E08000023', 'E08000024'],
'nottinghamshire': ['E10000024', 'E06000018'],
'south-wales': ['W06000015', 'W06000011', 'W06000013', 'W06000024',
'W06000012', 'W06000016', 'W06000014'],
'south-yorkshire': ['E08000016', 'E08000017', 'E08000018', 'E08000019'],
'staffordshire': ['E10000028', 'E06000021'],
'suffolk': ['E10000029'],
'surrey': ['E10000030'],
'sussex': ['E10000011', 'E10000032', 'E06000043'],
'thames-valley': ['E06000036', 'E06000038', 'E06000039', 'E06000037',
'E06000040', 'E06000041', 'E10000002', 'E10000025',
'E06000042'],
'west-mercia': ['E06000051', 'E10000034', 'E06000019', 'E06000020'],
'west-midlands': ['E08000025', 'E08000026', 'E08000027', 'E08000028',
'E08000029', 'E08000030', 'E08000031'],
'west-yorkshire': [
'E08000032', 'E08000033', 'E08000034', 'E08000035', 'E08000036'],
'wiltshire': ['E06000054', 'E06000030'],
}
COMBINED_AUTHORITY_SLUG_TO_GSS = {
'cambridgeshire-and-peterborough': ['E06000031', 'E10000003', 'E07000008',
'E07000011', 'E07000009', 'E07000012', 'E07000010'],
'greater-manchester-ca': ['E08000001', 'E08000002', 'E08000003', 'E08000004',
'E08000005', 'E08000006', 'E08000007', 'E08000008', 'E08000009',
'E08000010'],
'liverpool-city-ca': ['E06000006', 'E08000011', 'E08000012', 'E08000013',
'E08000014', 'E08000015'],
'tees-valley': ['E06000005', 'E06000001', 'E06000002', 'E06000003',
'E06000004'],
'west-midlands': ['E08000025', 'E08000026', 'E08000027', 'E08000028',
'E08000029', 'E08000030', 'E08000031'],
'west-of-england': ['E06000022', 'E06000023', 'E06000025'],
}
|
UTF-8
|
Python
| false | false | 4,209 |
py
| 5 |
constants.py
| 5 | 0.576146 | 0.235923 | 0 | 104 | 39.471154 | 81 |
peernode/offline_platform
| 7,490,422,977,018 |
86b60e420087a6b9c9e82ffdd77831c6cb059dbc
|
5c5e917a2ca20a2f3fdd6d1a01d66f3df0553a6c
|
/flashVoD/rate/connect_ratio_views.py
|
0a7671df5d1fabbdbd7972fba15156d83f9755d2
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/peernode/offline_platform
|
9d384262c4aee2c626630d5c5e3a321a351bd00c
|
3458dc3be422af3670776d35140b37b9f9637edd
|
refs/heads/master
| 2020-05-26T21:45:44.997335 | 2015-01-27T02:04:58 | 2015-01-27T02:04:58 | 29,892,543 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from rate.models import *
def update_connect_ratio(request):
result="ok"
if request.method=='POST':
decodes=json.loads(request.body)
try:
ver=version_info.get_version(decodes['ver'])
create_date='%s-%s-%s'%(decodes['create_date'][0:4], decodes['create_date'][4:6], decodes['create_date'][6:8])
# total suc info
total_obj=connect_ratio(Date=create_date,
Version=ver,
ConnectType=0,
Hour=decodes['hour'],
PostTotal=decodes['post_total'],
Fenzi=decodes['suc_total'])
total_obj.save()
# total offline info
offline_obj=connect_ratio(Date=create_date,
Version=ver,
ConnectType=2,
Hour=decodes['hour'],
PostTotal=decodes['post_total'],
Fenzi=decodes['fail_total'])
offline_obj.save()
# first suc info
first_obj=connect_ratio(Date=create_date,
Version=ver,
ConnectType=1,
Hour=decodes['hour'],
PostTotal=decodes['first_post_total'],
Fenzi=decodes['first_suc_total'])
first_obj.save()
# first offline info
first_offline_obj=connect_ratio(Date=create_date,
Version=ver,
ConnectType=3,
Hour=decodes['hour'],
PostTotal=decodes['first_post_total'],
Fenzi=decodes['first_fail_total'])
first_offline_obj.save()
except ValueError, e:
result="error: %s"%e
print e
except Exception, e:
result="error: %s"%e
print e
else:
result="error"
respStr=json.dumps({"result":result})
return HttpResponse(respStr, content_type="application/json")
|
UTF-8
|
Python
| false | false | 2,442 |
py
| 65 |
connect_ratio_views.py
| 41 | 0.423423 | 0.418919 | 0 | 60 | 39.7 | 122 |
Gregorh98/PlanterCostCalculator
| 6,597,069,783,886 |
0bc508fc1d5b5309cedead5f4aaf1026e189863f
|
3210361298a54c2415ac9b70dc4fcd59051a1de9
|
/planters.py
|
caa45fa9f84702a50fd273bc1beff0ce35a9f5ab
|
[] |
no_license
|
https://github.com/Gregorh98/PlanterCostCalculator
|
dc6f421157fbd59c55e59791e3d9d245bdcc6012
|
24907112b8247f27d20819e34814c64c0aa3e696
|
refs/heads/main
| 2023-03-02T19:30:30.749010 | 2021-02-13T23:35:29 | 2021-02-13T23:35:29 | 336,054,569 | 0 | 0 | null | false | 2021-02-07T01:09:50 | 2021-02-04T19:02:03 | 2021-02-07T01:09:11 | 2021-02-07T01:09:09 | 49 | 0 | 0 | 0 |
Python
| false | false |
import planks, linerRoll
import json, math
class Planter():
def __init__(self, dimensions, settings):
self.height, self.width, self.length = dimensions
self.settings = settings
self.markup = self.settings["general"]["markup"]
self.measurementType = self.settings["general"]["defaultUnitOfMeasurement"]
self.plank = planks.Plank(self.settings)
if self.length >= 90:
self.numberOfSupportPlanks = 6
else:
self.numberOfSupportPlanks = 4
def getAreaInside(self):
areaOfAirFloor = (self.width-(self.plank.height*2))*(self.length-(self.plank.height*2))
innerArea = areaOfAirFloor * self.height
return innerArea
def getAreaOutside(self):
innerArea = self.getAreaInside()
totalArea = (self.width * self.length) * self.height
outsideArea = totalArea - innerArea
return outsideArea
def calcScrewsNeeded(self):
return 50
def calcScrewCost(self):
screwsPerPlanter = self.calcScrewsNeeded()
screwsPerBox = self.settings["screws"]["numberPerBox"]
costPerBox = self.settings["screws"]["cost"]
costPerPlanter = ((costPerBox/screwsPerBox)*screwsPerPlanter)
return (costPerPlanter)
def calcWoodCostArea(self):
totalArea = self.getAreaOutside()
costPerArea = totalArea*self.plank.getCostPerCm3()
return costPerArea
def calcWoodCostPlanks(self):
woodCost = self.calcWoodNeeded() * self.plank.cost
return woodCost
def calcLinerCost(self):
#calculate areas of all box faces and ground
#Multiply the sum of these by liner cost per cm2
self.liner = linerRoll.Liner(self.settings)
longSideAreas = 2*((self.length*self.height) - (2*self.plank.height))
shortSideAreas = 2*((self.width*self.height) - (2*self.plank.height))
baseArea = ((self.length*self.width)-(4*self.plank.width))
totalAreaToCover = longSideAreas+shortSideAreas+baseArea
totalLinerCost = self.liner.getCostPerCm2()*totalAreaToCover
return totalLinerCost
def calcTotalCost(self):
total = self.calcScrewCost() + self.calcWoodCostPlanks() + self.calcLinerCost()
return total
def calcWoodNeeded(self):
planterArea = self.getAreaOutside()
plankArea = self.plank.getArea()
supportPlanks = ((self.plank.height*self.plank.width*self.height)*self.numberOfSupportPlanks)
woodNeeded = math.ceil((planterArea+supportPlanks)/plankArea)
return woodNeeded
def calcPlanksNeeded(self):
planksNeededForSections = int((2*(self.height/self.plank.width)))
lengthPlankDimensions = (str(self.length)+ "x" + str(self.plank.width))
widthPlankDimensions = (str(self.width) + "x" + str(self.plank.width))
supportPlankDimensions = (str(self.height) + "x" + str(self.plank.width))
planksNeeded = [["Length", lengthPlankDimensions, planksNeededForSections], ["Width", widthPlankDimensions, planksNeededForSections], ["Supports", supportPlankDimensions, self.numberOfSupportPlanks]]
return planksNeeded
def calcSoilNeeded(self):
innerVolume = self.getAreaInside()
soilLitresNeeded = math.ceil(innerVolume/1000)
return soilLitresNeeded
|
UTF-8
|
Python
| false | false | 3,035 |
py
| 13 |
planters.py
| 7 | 0.739703 | 0.732784 | 0 | 94 | 31.255319 | 201 |
pgmac/aws_helpers
| 9,818,295,246,684 |
5d35a462f29e03a62847aa0f129004d922ca6d28
|
ca769b501ae3b5a9cc62c203aca749579eea5abf
|
/route_53.py
|
a29d69cffb0df7c123fb288b01a9332d082c1c3a
|
[] |
no_license
|
https://github.com/pgmac/aws_helpers
|
3fb8c08cf9f9acfbb53916c193eea0532cf15777
|
ce0b7c22d2c1f78400dde758330cb2394751f19c
|
refs/heads/master
| 2022-10-19T09:31:07.212200 | 2022-09-19T07:33:50 | 2022-09-19T07:33:50 | 123,996,034 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
"""List the Route 53 DNS Entries"""
from pprint import pprint
import boto3
from botocore.exceptions import ClientError
def rr_display(client, rrset, level=0):
#print(rrset)
print("{}{}\t{}".format("\t"*level, rrset['Name'], rrset['Type']))
print("{}{}".format("\t"*(level+1), rrset['ResourceRecords']))
def zone_display(client, zone_det, level=0):
"""Display the zone details"""
print("{}{}".format("\t"*level, zone_det['Name']))
#print(zone_det)
response = client.list_resource_record_sets(HostedZoneId=zone_det['Id'])
zone = [rr_display(client, rrset, (level+1)) for rrset in response['ResourceRecordSets']]
return zone
def main():
"""Get the details"""
r53 = boto3.client('route53')
try:
response = r53.list_hosted_zones()
zones = [zone_display(r53, item) for item in response['HostedZones']]
#pprint(out_put)
#_ = (print(out_item) for out_item in out_put)
except ClientError as c_e:
print(c_e)
return zones
if __name__ == '__main__':
main()
|
UTF-8
|
Python
| false | false | 1,070 |
py
| 10 |
route_53.py
| 9 | 0.621495 | 0.605607 | 0 | 35 | 29.571429 | 93 |
SEA-group/wowp_scripts
| 12,506,944,802,968 |
fae8ae7215f5149ae378762fbc38b38ad1fe1590
|
3b504a983f1807ae7c5af51078bfab8c187fc82d
|
/client/fm/FMAvatarMethods.py
|
eae6f704f9f35b890ede8902abad6704e7ef4eda
|
[] |
no_license
|
https://github.com/SEA-group/wowp_scripts
|
7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58
|
2fe54a44df34f2dcaa6860a23b835dcd8dd21402
|
refs/heads/master
| 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Embedded file name: scripts/client/fm/FMAvatarMethods.py
def fmEmpty(func):
return func
def fmAvatarMethods(decorator):
if decorator is None:
decorator = fmEmpty
def addFMAvatarMethods(objClass):
class decorated(objClass):
@decorator
def fmData(self, data):
pass
@decorator
def fmInit(self, ticksPerUpdate, position, rotation):
pass
@decorator
def fmSync(self, frames):
pass
@decorator
def fmDiff(self, data):
pass
@decorator
def fmBinDataBegin(self):
pass
@decorator
def fmBinData(self, data):
pass
@decorator
def fmBinDataComplete(self):
pass
decorated.__name__ = objClass.__name__
return decorated
return addFMAvatarMethods
|
UTF-8
|
Python
| false | false | 971 |
py
| 1,504 |
FMAvatarMethods.py
| 1,016 | 0.513903 | 0.513903 | 0 | 47 | 19.680851 | 65 |
lxtxl/aws_cli
| 18,056,042,525,306 |
f319c11571d96425064427abda317d1a87ac3181
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_1/route-table_delete.py
|
bfb1a78ebfb2bfbaf6ac1d06ffec84059302e827
|
[] |
no_license
|
https://github.com/lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/delete-route-table.html
if __name__ == '__main__':
"""
associate-route-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/associate-route-table.html
create-route-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/create-route-table.html
describe-route-tables : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-route-tables.html
disassociate-route-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/disassociate-route-table.html
"""
parameter_display_string = """
# route-table-id : The ID of the route table.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "delete-route-table", "route-table-id", add_option_dict)
|
UTF-8
|
Python
| false | false | 1,306 |
py
| 3,596 |
route-table_delete.py
| 3,594 | 0.686064 | 0.676876 | 0 | 26 | 49.038462 | 128 |
J-Pai/408DaisyJetson
| 1,735,166,815,570 |
89ef5193ee265e22e2c4386e82a93a56dda47c42
|
12af2534582974d8f07ccde9deda54e4d8967a73
|
/tests/pure_face_tracking.py
|
e89753a08f5c6f06d3fad38c5c20bb8818c9b58d
|
[
"MIT"
] |
permissive
|
https://github.com/J-Pai/408DaisyJetson
|
853d8a1f3b0f0350128f149295a150b360bd3008
|
a873154325c790303f09ecfc03377066751cd601
|
refs/heads/master
| 2021-01-24T00:04:30.947233 | 2019-08-29T21:50:15 | 2019-08-29T21:50:15 | 122,752,585 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
import numpy as np
import cv2
import face_recognition
import sys
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from queue import Queue as ImageQueue
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
from pylibfreenect2 import setGlobalLogger
setGlobalLogger(None)
print("OpenGL Pipeline")
from pylibfreenect2 import OpenGLPacketPipeline
print("Starting Tracking")
def __draw_bbox(valid, frame, bbox, color, text):
if not valid:
return
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
cv2.putText(frame, text, (bbox[0], bbox[1] - 4), \
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
def __scale_frame(frame, scale_factor = 1):
if scale_factor == 1:
return frame
return cv2.resize(frame, (0,0), fx=scale_factor, fy=scale_factor)
def face_locations(image):
pass
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_web_neuron')
NeuronManager.register('get_alexa_neuron')
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
manager.connect()
web_neuron = manager.get_web_neuron()
alexa_neuron = manager.get_alexa_neuron()
faces = {
"JessePai": "../faces/JPai-1.jpg",
# "VladMok": "./faces/Vlad.jpg",
# "TeddyMen": "./faces/TMen-1.jpg"
}
known_faces = {}
for person in faces:
image = face_recognition.load_image_file(faces[person])
print(person)
face_encoding_list = face_recognition.face_encodings(image)
if len(face_encoding_list) > 0:
known_faces[person] = face_encoding_list[0]
else:
print("\tCould not find face for person...")
pipeline = OpenGLPacketPipeline()
target = "JessePai"
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline = pipeline)
listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
registration = Registration(device.getIrCameraParams(),
device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)
bigdepth = Frame(1920, 1082, 4)
trackerObj = None
face_process_frame = True
bbox = None
track_bbox = None
while True:
timer = cv2.getTickCount()
frames = listener.waitForNewFrame()
color = frames["color"]
depth = frames["depth"]
registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)
bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)
face_bbox = None
new_track_bbox = None
face_locations = face_recognition.face_locations(c, number_of_times_to_upsample=0, model="cnn")
face_encodings = face_recognition.face_encodings(c, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(
[known_faces[target]], face_encoding, 0.6)
if len(matches) > 0 and matches[0]:
(top, right, bottom, left) = face_locations[0]
face_bbox = (left, top, right, bottom)
mid_w = int((left + right) / 2)
mid_h = int((top + bottom) / 2)
break
__draw_bbox(face_bbox is not None, c, face_bbox, (0, 0, 255), target)
c = __scale_frame(c, scale_factor = 0.5)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
image = cv2.imencode('.jpg', c)[1].tostring()
web_neuron.update([('image', image)])
listener.release(frames)
self.so.close()
cv2.destroyAllWindows()
device.stop()
device.close()
|
UTF-8
|
Python
| false | false | 3,881 |
py
| 12 |
pure_face_tracking.py
| 10 | 0.679979 | 0.651121 | 0 | 140 | 26.707143 | 99 |
hglasser/phonebook-app
| 5,076,651,379,539 |
ea43b518c046b404e0da002ff7e1cf92b02fb659
|
cc3bb2bf5dd9d20c3b7976a803313bd723d46ccd
|
/set_an_entry.py
|
0f84d7707926c8971578953026ac7c1456175214
|
[] |
no_license
|
https://github.com/hglasser/phonebook-app
|
e7bab09b34e7cdffcd39d7abc13d847f84feaf8d
|
a49c8a03140de735fac84fb75c6653f17d8e8405
|
refs/heads/master
| 2020-03-22T05:57:24.027367 | 2018-07-09T13:24:30 | 2018-07-09T14:27:08 | 139,602,203 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def set_an_entry(phonebook):
name = raw_input("Name: ")
phone_number = raw_input("Enter phone number formatted like this ###-###-####: ")
phonebook[name] = phone_number
return "Entry stored for %s" % name
|
UTF-8
|
Python
| false | false | 220 |
py
| 8 |
set_an_entry.py
| 7 | 0.631818 | 0.631818 | 0 | 5 | 43.2 | 85 |
ProkopHapala/SimpleSimulationEngine
| 180,388,669,678 |
ae192ac14dcb97fbb19fc7959b83ae087b52a8b4
|
07e6fc323f657d1fbfc24f861a278ab57338b80a
|
/python/pySimE/space/exp/pykep/lambert_Prokop.py
|
a9901e407553f656542150e379af6b6744fa6025
|
[
"MIT"
] |
permissive
|
https://github.com/ProkopHapala/SimpleSimulationEngine
|
99cf2532501698ee8a03b2e40d1e4bedd9a12609
|
47543f24f106419697e82771289172d7773c7810
|
refs/heads/master
| 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP import epoch, DAY2SEC, planet_ss, AU, MU_SUN, lambert_problem
from PyKEP.orbit_plots import plot_planet, plot_lambert
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
t1 = epoch(0)
t2 = epoch(740)
dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC
ax.scatter(0,0,0, color='y')
pl = planet_ss('earth')
plot_planet(ax,pl, t0=t1, color=(0.8,0.8,1), legend=True, units = AU)
rE,vE = pl.eph(t1)
pl = planet_ss('mars')
plot_planet(ax,pl, t0=t2, color=(0.8,0.8,1), legend=True, units = AU)
rM, vM = pl.eph(t2)
l = lambert_problem(rE,rM,dt,MU_SUN)
nmax = l.get_Nmax()
print "max number of revolutions",nmax
plot_lambert(ax,l , color=(1,0,0), legend=True, units = AU)
for i in range(1,nmax*2+1):
print i
plot_lambert(ax,l,sol=i, color=(1,0,i/float(nmax*2)), legend=True, units = AU)
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
axisEqual3D(ax)
plt.show()
|
UTF-8
|
Python
| false | false | 1,376 |
py
| 1,233 |
lambert_Prokop.py
| 875 | 0.621366 | 0.579942 | 0 | 55 | 22.945455 | 81 |
Muele93/Ndivhu
| 16,338,055,628,387 |
6851cdc6e99a5c8cd429a19561186adeddb42896
|
0924cd1386c59dc746196b9a01a8dd817c9fc9cd
|
/Ndifunc/recursion.py
|
02d03a179305c11d2e7e1c28e5593d57901f292c
|
[] |
no_license
|
https://github.com/Muele93/Ndivhu
|
2ef79771d6d3328bdb282dd10d8f2802754a3619
|
80c18b2c4f58bc25c59360eeeb7bfa6a62d44c47
|
refs/heads/master
| 2020-04-30T23:25:09.460560 | 2019-07-02T13:00:59 | 2019-07-02T13:00:59 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def sum_array(array):
"""funtion returns the sum of all items in array"""
x= len(array)
if x == 1:
return array[0]
else:
return array[0] + sum_array(array[1:],)
def fibonacci(n):
""""Returns the nth term in a fibonacci sequence when n==0 and n==1"""
if n==0 or n==1:
return n
else:
#When n>1, the nth term of a fibonacci sequence is returned by adding the 2 previous terms
return fibonacci(n-1) + fibonacci(n-2)
def factorial(n):
"""Returns the factorial of n when it is greater than 0"""
if n==0:
return 1
#The factorial of 0 is always 1
else:
return n* factorial(n-1)
def reverse(word):
"""The function returns a string in reverse"""
reversed_string = ''
index = len(word)
while index:
index =index- 1
reversed_string = reversed_string + word[index]
return reversed_string
|
UTF-8
|
Python
| false | false | 918 |
py
| 6 |
recursion.py
| 3 | 0.600218 | 0.579521 | 0 | 36 | 24.5 | 98 |
fbartusch/travellingSalesperson
| 2,216,203,154,597 |
5855d33f13431a9b0fdfb5ffa75d9c852d7196e3
|
12151fd50e483a8a68594eb795f7bc83d4e0a3ca
|
/tests/test_tspsolver.py
|
db92de8ea86a2b4f48d1e5ccb9eba91b0b264212
|
[
"MIT"
] |
permissive
|
https://github.com/fbartusch/travellingSalesperson
|
ece0b2abdedf777125aaa959a3f9ebb13eee25c7
|
94d8245a08b5c17612d00d8e4704bb08f595fc1c
|
refs/heads/master
| 2022-11-13T23:07:52.911235 | 2020-07-02T20:01:33 | 2020-07-02T20:01:33 | 275,645,328 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from unittest import TestCase
import numpy as np
from tsp.tspsolver import NaiveTSPSolver
from tsp.tspsolver import NearestNeighbor
from tsp.tspsolver import Christofides
class TestTSPSolver(TestCase):
def __init__(self, *args):
super().__init__(*args)
# From: https://people.sc.fsu.edu/~jburkardt/datasets/tsp/tsp.html
self.d = np.array([[0.0, 3.0, 4.0, 2.0, 7.0],
[3.0, 0.0, 4.0, 6.0, 3.0],
[4.0, 4.0, 0.0, 5.0, 8.0],
[2.0, 6.0, 5.0, 0.0, 6.0],
[7.0, 3.0, 8.0, 6.0, 0.0]])
def test_naive(self):
solver = NaiveTSPSolver(self.d)
solver.solve()
self.assertEqual(solver.dist, 19)
self.assertEqual(solver.solution, [0, 2, 1, 4, 3])
def test_nearest_neighbor(self):
solver = NearestNeighbor(self.d)
solver.solve()
self.assertEqual(solver.dist, 21)
self.assertEqual(solver.solution, [0, 3, 2, 1, 4])
def test_christofides(self):
solver = Christofides(self.d)
solver.solve()
self.assertEqual(solver.dist, 21)
self.assertEqual(solver.solution, [0, 2, 3, 4, 1])
|
UTF-8
|
Python
| false | false | 1,185 |
py
| 6 |
test_tspsolver.py
| 5 | 0.564557 | 0.504641 | 0 | 36 | 31.916667 | 74 |
gistable/gistable
| 17,411,797,444,224 |
9ec3489c240813c984ad94fea4c0c5a8a07e8818
|
255e19ddc1bcde0d3d4fe70e01cec9bb724979c9
|
/dockerized-gists/55c132a1941254e9ba899cbb28db8327/snippet.py
|
61ec914bf1f4e288d229a009547a5cf7088e9510
|
[
"MIT"
] |
permissive
|
https://github.com/gistable/gistable
|
26c1e909928ec463026811f69b61619b62f14721
|
665d39a2bd82543d5196555f0801ef8fd4a3ee48
|
refs/heads/master
| 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | false | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | 2020-07-19T04:11:52 | 2018-03-19T15:12:34 | 12,234 | 64 | 16 | 1 |
Python
| false | false |
"""Python switch-statement pseudo-implementation
Mimics C-style switch statements
The following code blocks should be equivalent
-----------------------------------------------
switch(arg):
case 1:
// Handle case
case 2:
// Handle case
default:
// Handle default case
-----------------------------------------------
class Case1(SwitchCall):
def __init__(self, arg1, arg2):
self.__arg1 = arg1
self.__arg2 = arg2
def do_call(self, *args, **kwargs)
# Handle call
return self.__arg1 - self.__arg2
class Case2(SwitchCall):
def __init__(self, arg1, arg2):
self.__arg1 = arg1
self.__arg2 = arg2
def do_call(self, *args, **kwargs)
# Handle call
return self.__arg1 * self.__arg2
class CaseDefault(SwitchCall):
def __init__(self, arg1, arg2):
self.__arg1 = arg1
self.__arg2 = arg2
def do_call(self, *args, **kwargs)
return self.__arg1 + self.__arg2
switch(arg, {
case_1 : Case1(arg1, arg2),
case_2 : Case2(arg1, arg2)
}, CaseDefault(arg1, arg2))
"""
__author__ = 'Thomas Li Fredriksen'
__license__ = 'MIT'
class SwitchCall(object):
"""Switch-call master class
All switch-call objects must inherit from this class
"""
def do_call(self, *args, **kwargs):
"""Overload this function to simulate function call"""
pass
def __call__(self, *args, **kwargs):
"""Call do_call-method"""
self.do_call(args, kwargs)
class switch(object):
def __init__(self, key, cases, default=None):
"""Switch-statement implementation
:param key: Switch parameter
:param cases: Dictionary of callbacks
:param default: Default callback if key is not in cases
"""
ret = None
try:
ret = cases[key]()
except KeyError:
if default:
ret = default()
finally:
return ret
|
UTF-8
|
Python
| false | false | 1,959 |
py
| 7,080 |
snippet.py
| 7,021 | 0.54926 | 0.529862 | 0 | 65 | 29.153846 | 63 |
sleao/ex_fastapi
| 14,508,399,575,477 |
f8d9a9965890bd2369975090861840129791648c
|
3658eeaaa4906d44e11ad894af61534513dabb64
|
/app/api/api_v1/endpoints/example.py
|
bbcaa03e383e22d2be4852b44cfe0ebb32f71c43
|
[] |
no_license
|
https://github.com/sleao/ex_fastapi
|
8a95c043e5b3c77081256d28449f93a9b43f7a05
|
71459182eeccb4f0cf06cd881ce48a3083fbc9b4
|
refs/heads/master
| 2023-05-31T13:00:37.161891 | 2020-07-24T13:11:50 | 2020-07-24T13:11:50 | 282,109,949 | 1 | 0 | null | false | 2021-06-11T18:14:33 | 2020-07-24T03:08:01 | 2021-06-02T13:40:26 | 2021-06-11T18:14:32 | 7 | 1 | 0 | 4 |
Python
| false | false |
from fastapi import APIRouter
from app.schemas import echo, hello
router = APIRouter()
@router.get('/hello', response_model=hello.Hello)
def hello_world():
return {'msg': 'Hello World'}
@router.post('/echo', response_model=echo.Echo)
def echo(msg_in: echo.Echo):
return {'msg': echo}
|
UTF-8
|
Python
| false | false | 295 |
py
| 7 |
example.py
| 5 | 0.701695 | 0.701695 | 0 | 13 | 21.769231 | 49 |
dragos-vacariu/Python-Exercises
| 9,964,324,165,603 |
4692baa04d679ea559dd9cb93e41b121657c7499
|
9b262e9b291902abd7fa567a2edf50799c7952ac
|
/Basic Practice/program18 distance between circle point.py
|
b3513f720ef75b718ed5542e2f60e237e6ed8255
|
[] |
no_license
|
https://github.com/dragos-vacariu/Python-Exercises
|
983ad571879cb24f9ae29f67d57adb94356bf1b1
|
1578cfb973409f40aa658fe59d967861eac6b070
|
refs/heads/master
| 2021-06-13T17:37:52.624589 | 2021-04-22T16:18:22 | 2021-04-22T16:18:22 | 172,189,766 | 0 | 2 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#clasa PUNCT cu 2 parametri x, y
#clasa CERC mosteneste PUNCT, si defineste o raza
#CERC are o metoda de calculul a ariei
class PUNCT:
#constructor
def __init__(self, xCoord, yCoord):
self.x=xCoord #non-static field
self.y=yCoord #non-static field
#public method
def calculateDistance(self, Punct):
distanceX = Punct.x - self.x
distanceY = Punct.y - self.y
distance = (distanceX**2) + (distanceY**2)
return distance ** 0.5
#operator overloading
def __add__(self, other): #overloading the "+" operator for addition
if(isinstance(other, POLYLINIE)):
points = [self]
for x in other.Points:
points.append(x)
return POLYLINIE(*points)
else:
polilinie = POLYLINIE(self, other)
return polilinie
#built-in function overloading
def __str__(self):
return "Punct(" +str(self.x) + ", " +str(self.y) +")"
class CERC(PUNCT): #CERC inherits from PUNCT
#constructor
def __init__(self, Punct, Rad):
super().__init__(Punct.x, Punct.y) #super() returns the parent class
self.Radius = float(Rad)
#public methods
def getArea(self):
return 3.14 * self.Radius**2
def calculateDistanceToObject(self, Data):
if isinstance(Data, CERC):
return super().calculateDistance(Data) - self.Radius - Data.Radius
elif isinstance(Data, PUNCT):
return super().calculateDistance(Data) - self.Radius
class SEGMENT(PUNCT):
def calculateDistance(self, Punct1, Punct2):
distanceX = Punct2.x - Punct1.x
distanceY = Punct2.y - Punct1.y
distance = (distanceX**2) + (distanceY**2)
return distance ** 0.5
#constructor
def __init__(self, Punct1, Punct2):
super().__init__(Punct1.x, Punct1.y)
super().__init__(Punct2.x, Punct2.y)
self.length = self.calculateDistance(Punct1,Punct2)
#build-in function overloading
def __str__(self): #redefining the function __str__ so it can be printed directly print(obj)
return "Length of segment: " + str(self.length)
#Operator overloading
def __gt__(self, other): #overloading > operators (gt comes from greater)
return self.length < other.length
#also work for <, because of negations
def __ge__(self, other): #overloading >= operators (ge comes from greater than or equal to)
return self.length < other.length
#also work for <= because of negations
def __eq__(self, other): #overloading == operator
return self.length == other.length
class POLYLINIE:
#constructor
def __init__(self, *Points):
self.Points=list(Points)
#built-in function overloading
def __str__(self):
value="Polilinie("
for p in self.Points:
value+=str(p)+", "
value+=")"
return value
#operator overloading
def __add__(self, other): #overloading the "+" operator for addition
puncte = []
for x in self.Points:
puncte.append(x)
puncte.append(other)
return POLYLINIE(*puncte)
def __iter__(self): #making class iterable
self.counter=0
def __next__(self): #making clas iterable
if self.counter < len(self.Points):
self.counter+=1
else:
raise Exception ("Object reached boundaries")
x = 3; y = 4
#x = float(input("p1.x: "))
#y = float(input("p1.y: "))
punctUnu = PUNCT(x,y)
cerc = CERC(punctUnu, Rad = 2.0)
x = 5; y = 6
#x = float(input("p2.x: "))
#y = float(input("p2.y: "))
punctDoi = PUNCT(x,y)
cerc2 = CERC(punctDoi, Rad = 3.0)
print("\nArea of circle = " + str(cerc.getArea()))
print("Distance PointToPoint = " + str(punctUnu.calculateDistance(punctDoi)))
print("Distance CircleToPoint = " + str(cerc.calculateDistanceToObject(punctDoi)))
print("Distance CircleToCircle = " + str(cerc.calculateDistanceToObject(cerc2)))
segment = SEGMENT(punctUnu, punctDoi)
print(segment)
segmentSec = SEGMENT(punctDoi, PUNCT(10,7))
print() #put a new line
print(segmentSec)
print(segment < segmentSec)
print(segment >= segmentSec)
print(segment == segmentSec)
print() #put a new line
polyline = POLYLINIE(punctUnu, punctDoi)
#print(polyline)
print("Result: ")
polylineDoi = punctUnu + (punctDoi + punctUnu) # building a polyline from 3 points
print(polylineDoi)
|
UTF-8
|
Python
| false | false | 4,421 |
py
| 72 |
program18 distance between circle point.py
| 64 | 0.62271 | 0.612079 | 0 | 148 | 28.871622 | 96 |
Jirapongs55/CP3-Jirapong-Sirichottanawong
| 12,515,534,751,432 |
0946c9e920fcd155dce100e652265b623c7e465c
|
a0bee93fa987d2ac5f7f8e4f830b5e8965326f52
|
/Exercise11_Jirapong_S.py
|
6cb8717ac8746253ed98af6801794a785b9abdc7
|
[] |
no_license
|
https://github.com/Jirapongs55/CP3-Jirapong-Sirichottanawong
|
563bd855c31d77deb8b789a77b4cd5fccba094ca
|
6ddbba0b4724cfe5fce7414d6850a440acf58d61
|
refs/heads/main
| 2023-07-09T03:45:28.207272 | 2021-08-09T15:06:50 | 2021-08-09T15:06:50 | 384,119,133 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
inputNumber = int(input("Input the pyramid level : "))
blankText = " "
text = input("Input the character for pyramid monomer : ")
for i in range(inputNumber):
print(blankText*(inputNumber-(i+1)),end=text*((2*i)+1))
print(blankText*(inputNumber-(i+1)))
print("-"*12,"The End","-"*12)
'''
# Pattern
print(" "*2,end="*")
print(" "*2)
print(" "*1,end="***")
print(" "*1)
print(" "*0,end="*****")
print(" "*0)
'''
|
UTF-8
|
Python
| false | false | 471 |
py
| 16 |
Exercise11_Jirapong_S.py
| 14 | 0.518047 | 0.488323 | 0 | 17 | 23.588235 | 59 |
LillyParker/Python_ExercisesNMNH
| 9,612,136,855,826 |
01a8c08695a24f51467fcdcff90270287993eabe
|
6faa1e9b3e5a149a0081b9387407bdb30ce72ef2
|
/Week2Hmwk_calculator.py
|
40482ac3b25d2075cea53e2d36c128090ce53f9e
|
[] |
no_license
|
https://github.com/LillyParker/Python_ExercisesNMNH
|
648d8647c63bb9f3769db40fbee0f03419a50c62
|
41a6d21e188a45f3f8ec7b98546de4ee38bb7008
|
refs/heads/master
| 2021-01-19T03:35:04.862394 | 2016-07-31T22:00:46 | 2016-07-31T22:00:46 | 63,359,777 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# Homework for Week2 of the Python course at NMNH
# Assignment: Make a program that will perform addition, subtraction, division or multiplication of two numbers. Use appropriate error messages.
print("Welcome to the calculator!","\nPlease enter two numbers, followed by the operator to use.")
try:
response1 = input("Enter the first number:")
num1=float(response1)
response2 = input("Enter the second number:")
num2=float(response2)
except: "Please enter a valid number"
print("Addition=1","\nSubtraction=2","\nDivision=3","\nMultiplication=4")
while True:
response3 = input("Please enter the number for the operator you want, when finished, enter done:")
operator=str(response3)
if operator=="1":
print("The sum of ",num1,"and ",num2,"equals: ",num1+num2)
elif operator=="2":
print("The difference between ",num1,"and ",num2,"equals: ",num1-num2)
elif operator=="3":
print("The quotient of ",num1,"and ",num2,"equals: ",num1/num2)
elif operator=="4":
print("The product of ",num1,"and ",num2,"equal: ",num1*num2)
elif operator=="done":
break
else:
print("Please enter a valid operator")
print("Thanks for using the calculator!")
|
UTF-8
|
Python
| false | false | 1,169 |
py
| 6 |
Week2Hmwk_calculator.py
| 4 | 0.714286 | 0.686056 | 0 | 32 | 35.375 | 144 |
aegiacometti/netconf-backup
| 7,086,696,075,229 |
cf25934a294747155a97b9b8614d5f493c747282
|
86cf7b195564849ea73a405f32eaa732495d367a
|
/scripts/netconf-backup-f5.py
|
a6160a18d4041b2b493bff533ee5a0c61bebb207
|
[
"Apache-2.0"
] |
permissive
|
https://github.com/aegiacometti/netconf-backup
|
10a26ba403dcaca3b9a434c56016df44f5913e2f
|
f0a890f8ba614a3fe33e6408665dac60fcbbab4c
|
refs/heads/master
| 2021-03-16T15:13:42.035759 | 2020-10-22T13:21:20 | 2020-10-22T13:21:20 | 246,919,449 | 1 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import sys
import time
import glob
import os
device_hostname = sys.argv[1]
historic_files_to_keep = sys.argv[2]
def remove_old_files():
list_files = glob.glob('./backups/{}*'.format(device_hostname))
if len(list_files) >= int(historic_files_to_keep):
list_files.sort()
for index in range(int(historic_files_to_keep)-1, len(list_files)-1):
os.remove(list_files[index])
def rename_current_file():
get_date = time.localtime()
current_date = time.strftime('%Y-%m-%d-%H-%M-%S', get_date)
filename = './backups/{}-{}.ucs'.format(device_hostname, current_date)
os.rename('./backups/backup.ucs', filename)
if __name__ == '__main__':
rename_current_file()
remove_old_files()
|
UTF-8
|
Python
| false | false | 736 |
py
| 11 |
netconf-backup-f5.py
| 3 | 0.634511 | 0.629076 | 0 | 30 | 23.533333 | 77 |
BrenoBaiardi/GSC
| 10,943,576,675,657 |
602a7541fc08fde5a9fb7a81b5cf6dc85934de37
|
937c24b2d1df5731ed76d0254d7a8de495bfdd34
|
/test_GSC.py
|
c90369b9c32e6704c9c06a3a24cd80dafeafd100
|
[] |
no_license
|
https://github.com/BrenoBaiardi/GSC
|
b3bdab288ecf4b88f5ce9ff22652d6d9a16bb62c
|
cd713e0925eebcd78242e319a17dd01d031667ce
|
refs/heads/master
| 2020-07-01T23:03:35.391405 | 2019-08-08T21:05:55 | 2019-08-08T21:05:55 | 201,334,224 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# O sistema dos correios de Gotham City tiveram um problema e perderam seu validador de CEPs. Hoje, sua missão é criar um validador de CEPs baseados em algumas pequenas regras listadas abaixo:
#
# 1. O CEP é um número maior que 100.000 e menor que 999999
# 2. O CEP não pode conter nenhum nenhum dígito repetitivo alternado em pares
#
#
# 121426 # Aqui, 1 é um dígito repetitivo alternado em par.
# 523563 # Aqui nenhum digito é alternado.
# 552523 # Aqui os números 2 e 5 são dígitos alternados repetitivos em par.
# 112233 # Aqui nenhum dígito é repetitivo alternado em par.
from unittest import TestCase
from GSC import GSC
class TestGSC(TestCase):
def test_verificar_cep_dentro_de_range_aceito(self):
CEP = str(112233)
self.assertEqual(True, GSC.validar_cep(CEP))
def test_verificar_cep_abaixo_de_range_aceito(self):
CEP = "002233"
with self.assertRaises(ValueError):
GSC.validar_cep(CEP)
def test_verificar_cep_acima_de_range_aceito(self):
CEP = str(1999999)
with self.assertRaises(ValueError):
GSC.validar_cep(CEP)
def test_verificar_cep_nao_repetido_alternado(self):
CEP = str(112233)
self.assertEqual(True, GSC.validar_cep(CEP))
def test_verificar_cep_nao_alternado(self):
CEP = str(523563)
self.assertEqual(True, GSC.validar_cep(CEP))
def test_verificar_todos_numeros_repetidos(self):
CEP = str(999999)
self.assertEqual(False, GSC.validar_cep(CEP))
def test_verificar_1_digito_repetido_alternado_par_no_comeco(self):
CEP = str(121426)
self.assertEqual(False, GSC.validar_cep(CEP))
def test_verificar_1_digito_repetido_alternado_par_no_meio(self):
CEP = str(123256)
self.assertEqual(False, GSC.validar_cep(CEP))
def test_verificar_1_digito_repetido_alternado_par_no_fim(self):
CEP = str(163252)
self.assertEqual(False, GSC.validar_cep(CEP))
def test_verificar_2_digitos_repetidos_alternados_pares(self):
CEP = str(552523)
self.assertEqual(False, GSC.validar_cep(CEP))
def test_verificar_entrada_de_CEP_por_texto(self):
CEP = str(112233)
self.assertEqual(True, GSC.validar_cep(CEP))
def test_verificar_entrada_de_CEP_por_numero(self):
CEP = 112233
self.assertEqual(True, GSC.validar_cep(CEP))
def test_verificar_entrada_de_CEP_texto_formatado(self):
try:
CEP = '1-1-2-2-3-3'
GSC.validar_cep(CEP)
CEP = '1.1.2.2.3.3'
GSC.validar_cep(CEP)
CEP = '1 1 2 2 3 3'
GSC.validar_cep(CEP)
CEP = '11 22 33'
GSC.validar_cep(CEP)
except:
self.fail()
|
UTF-8
|
Python
| false | false | 2,764 |
py
| 3 |
test_GSC.py
| 3 | 0.650909 | 0.599273 | 0 | 78 | 34.25641 | 192 |
vaibhavkollipara/URLShortener
| 9,423,158,290,217 |
407b664999c2082c04f01709a0263b9e5fc0d6c2
|
626b4425852f5de011008b09bfda787133fd3e95
|
/shortener/models.py
|
5a2bae4e50825f00b57dcd1f69fb0c632ee4a95e
|
[] |
no_license
|
https://github.com/vaibhavkollipara/URLShortener
|
ea789ac9a13354957fbe163a71a59acc52624bf9
|
ac5ed774e02f962177fb7b1ee1a93f9869e83c7c
|
refs/heads/master
| 2021-01-18T16:39:10.677863 | 2017-04-06T21:26:08 | 2017-04-06T21:26:08 | 86,754,016 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.db import models
from .utils import create_shortcode
from django.conf import settings
SHORTCODE_MAX = getattr(settings, "SHORTCODE_MAX",15)
class ShortURLManager(models.Manager):
def all(self,*args,**kwargs):
qs_main = super(ShortURLManager,self).all(*args,**kwargs)
qs = qs_main.filter(active=True)
return qs
def refresh_shorturls(self):
qs = ShortURL.objects.filter(id__gte=1)
new_codes = 0
for q in qs:
q.shorturl = None
q.save()
new_codes += 1
return "refreshed {} urls".format(new_codes)
class ShortURL(models.Model):
url = models.CharField(max_length=250)
shorturl = models.CharField(max_length=SHORTCODE_MAX,
unique=True,
blank=True
)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
objects = ShortURLManager()
def save(self,*args,**kwargs):
if self.shorturl is None or self.shorturl=='':
self.shorturl = create_shortcode(self)
super(ShortURL,self).save(*args,**kwargs)
def __str__(self):
return str(self.url)
def __unicode__(self):
return str(self.url)
def get_shorturl(self):
return
|
UTF-8
|
Python
| false | false | 1,396 |
py
| 11 |
models.py
| 8 | 0.594556 | 0.588825 | 0 | 49 | 27.510204 | 65 |
wenshixiaoying/chapter7
| 3,908,420,283,988 |
5ba046d70ebc4dda8fe32b0919ab56b7c51d5ca1
|
b315b0ee1ba44402c03035ca90c78e0cc3f1d3f1
|
/modules/environment.py
|
bb4823618ef984522e3215b0a926df5970033e2a
|
[] |
no_license
|
https://github.com/wenshixiaoying/chapter7
|
c46717662635a89c0b0323141619eba540aaae7f
|
82398e59eff845558e56dbacc4e3f92b00810c19
|
refs/heads/master
| 2023-02-02T17:03:25.409530 | 2020-12-21T11:26:03 | 2020-12-21T11:26:03 | 322,233,563 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
import os
#返回木马所在主机的环境变量
def run(**kwargs):
print('This is environment module')
return str(os.environ)
|
UTF-8
|
Python
| false | false | 181 |
py
| 3 |
environment.py
| 2 | 0.658065 | 0.645161 | 0 | 9 | 16.222222 | 39 |
luispedro/mahotas
| 13,357,348,321,177 |
0d6a3fd3e72850d6ed26fbd7cf542f62f91dc23d
|
829a7b38243821a1d3f274952fd7c4e5140c9f40
|
/mahotas/edge.py
|
18fbbee69a075d1035e39eb75e0ff13b24d01f19
|
[
"MIT",
"BSL-1.0"
] |
permissive
|
https://github.com/luispedro/mahotas
|
e6f9c1e21acaded80946ebf8b1c0366cbcd91d2f
|
f7edeb22a4b47e6c5200c008a8e7386067d72443
|
refs/heads/master
| 2023-08-23T22:38:05.018281 | 2023-06-09T12:57:00 | 2023-06-09T22:11:37 | 495,613 | 648 | 175 |
NOASSERTION
| false | 2023-05-26T18:46:28 | 2010-01-31T00:13:06 | 2023-05-15T13:09:07 | 2023-04-29T05:36:35 | 3,965 | 800 | 154 | 24 |
Python
| false | false |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2014, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT (see COPYING file)
from __future__ import division
import numpy as np
import mahotas as mh
from . import convolve, gaussian_filter
_hsobel_filter = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])/8.
_vsobel_filter = np.array([
[-1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])/8.
__all__ = [
'sobel',
'dog',
]
def sobel(img, just_filter=False):
'''
edges = sobel(img, just_filter=False)
Compute edges using Sobel's algorithm
`edges` is a binary image of edges computed according to Sobel's algorithm.
This implementation is tuned to match MATLAB's implementation.
Parameters
----------
img : Any 2D-ndarray
just_filter : boolean, optional
If true, then return the result of filtering the image with the sobel
filters, but do not threashold (default is False).
Returns
-------
edges : ndarray
Binary image of edges, unless `just_filter`, in which case it will be
an array of floating point values.
'''
# This is based on Octave's implementation,
# but with some reverse engineering to match Matlab exactly
img = np.array(img, dtype=float)
if img.ndim != 2:
raise ValueError('mahotas.sobel: Only available for 2-dimensional images')
img -= img.min()
ptp = img.ptp()
if ptp == 0:
return img
img /= ptp
# Using 'nearest' seems to be MATLAB's implementation
vfiltered = convolve(img, _vsobel_filter, mode='nearest')
hfiltered = convolve(img, _hsobel_filter, mode='nearest')
vfiltered **= 2
hfiltered **= 2
filtered = vfiltered
filtered += hfiltered
if just_filter:
return filtered
thresh = 2*np.sqrt(filtered.mean())
return mh.regmax(filtered) * (np.sqrt(filtered) > thresh)
def dog(img, sigma1 = 2, multiplier = 1.001, just_filter = False):
'''
edges = dog(img, sigma1 = 2, thresh= None, just_filter = False)
Compute edges using the Difference of Gaussian (DoG) operator.
`edges` is a binary image of edges.
Parameters
----------
img : Any 2D-ndarray
sigma1 : the sigma value of the first Gaussian filter. The second filter
will have sigma value 1.001*sigma1
multiplier : the multiplier to get sigma2. sigma2 = sigma1 * multiplier
just_filter : boolean, optional
If true, then return the result of filtering the image with the DoG
filters, no zero-crossing is detected (default is False).
Returns
-------
edges : ndarray
Binary image of edges, unless `just_filter`, in which case it will be
an array of floating point values.
'''
img = np.array(img, dtype=float)
if img.ndim != 2:
raise ValueError('mahotas.dog: Only available for 2-dimensional images')
sigma2 = sigma1 * multiplier
G1 = gaussian_filter(img, sigma1, mode = 'nearest')
G2 = gaussian_filter(img, sigma2, mode = 'nearest')
DoG = G2 - G1
(m, n) = img.shape
if not just_filter:
e = np.zeros((m, n), dtype=bool)
else:
return DoG
thresh = .75 * np.mean(abs(DoG))
# Look for the zero crossings: +-, -+ and their transposes
# Choose the edge to be the negative point
rr = np.arange(1, m-2)
cc = np.arange(1, n-2)
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rr,cc)] < 0, DoG[np.ix_(rr,cc+1)] > 0),
abs( DoG[np.ix_(rr,cc)] - DoG[np.ix_(rr,cc+1)]) > thresh) ) # [- +]
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rr,cc-1)] > 0, DoG[np.ix_(rr,cc+1)] < 0),
abs( DoG[np.ix_(rr,cc-1)] - DoG[np.ix_(rr,cc)]) > thresh) ) # [+ -]
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rr,cc)] < 0, DoG[np.ix_(rr+1,cc)] > 0),
abs( DoG[np.ix_(rr,cc)] - DoG[np.ix_(rr+1,cc)]) > thresh) ) # [- +]'
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rr-1,cc)] > 0, DoG[np.ix_(rr,cc)] < 0),
abs( DoG[np.ix_(rr-1,cc)] - DoG[np.ix_(rr,cc)]) > thresh) ) # [+ -]'
e[(rx,cx)] = 1
# Another case: DoG can be precisely zero
(rz,cz) = np.nonzero(DoG[np.ix_(rr,cc)] == 0)
if rz.size != 0:
# Look for the zero crossings: +0-, -0+ and their transposes
# The edge lies on the Zero point
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rz,cz-1)] < 0, DoG[np.ix_(rz,cz+1)] > 0),
abs( DoG[np.ix_(rz,cz+1)] - DoG[np.ix_(rz,cz-1)]) > thresh) ) # [- 0 +]
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rz,cz-1)] > 0, DoG[np.ix_(rz,cz+1)] < 0),
abs( DoG[np.ix_(rz,cz-1)] - DoG[np.ix_(rz,cz+1)]) > thresh) ) # [+ 0 -]
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rz-1,cz)] < 0, DoG[np.ix_(rz+1,cz)] > 0),
abs( DoG[np.ix_(rz+1,cz)] - DoG[np.ix_(rz-1,cz)]) > thresh) ) # [- 0 +]'
e[(rx,cx)] = 1
(rx,cx) = np.nonzero(
np.logical_and(np.logical_and(DoG[np.ix_(rz-1,cz)] > 0, DoG[np.ix_(rz+1,cz)] < 0),
abs( DoG[np.ix_(rz-1,cz)] - DoG[np.ix_(rz+1,cz)]) > thresh) ) # [+ 0 -]'
e[(rx,cx)] = 1
return e
|
UTF-8
|
Python
| false | false | 5,678 |
py
| 160 |
edge.py
| 121 | 0.544382 | 0.521486 | 0 | 160 | 34.3 | 101 |
s-kyum/Python
| 15,058,155,376,638 |
f00d834a9033871d6e2e123e845479c0de705c18
|
27fc04a95b0d268adef4d4497c27ea9ae295d8a4
|
/ch02/calcu.py
|
47c677978bd91b50f959944e2d24735f47abff80
|
[] |
no_license
|
https://github.com/s-kyum/Python
|
2b35b333557db0698a3fd305d550baaa5304f206
|
e5b31036acd2bfb79f98ff02d59096a2429eb41f
|
refs/heads/master
| 2023-07-09T18:45:26.179057 | 2021-08-23T03:07:57 | 2021-08-23T03:07:57 | 378,803,615 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#산술 연산자
print("7+4=",7+4)
print("7-4=",7-4)
print("7*4=",7*4)
print("7/4=",7/4)
print("7%4=",7%4)
print("7//4=",7//4) #몫
print("7**4=",7**4) #제곱
|
UTF-8
|
Python
| false | false | 162 |
py
| 101 |
calcu.py
| 95 | 0.486301 | 0.294521 | 0 | 9 | 15.222222 | 23 |
Silentsoul04/FTSP_2020
| 9,405,978,385,334 |
31a4b5eaf14f1b5f16068cd1c527d2229da270b8
|
c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8
|
/Forsk Pandas/baltimore.py
|
9cc6cda3afb6fa44620494517d06203ed8625c01
|
[] |
no_license
|
https://github.com/Silentsoul04/FTSP_2020
|
db0dae6cd9c371f3daa9219f86520dfa66348236
|
7e603af918da2bcfe4949a4cf5a33107c837894f
|
refs/heads/master
| 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 18:01:43 2020
@author: Rajesh
"""
"""
Code Challenge
Name:
Baltimore City Analysis
Filename:
baltimore.py
Problem Statement:
Read the Baltimore_City_Employee_Salaries_FY2014.csv file
and perform the following task :
0. remove the dollar signs in the AnnualSalary field and assign it as a int.
0. Group the data on JobTitle and AnnualSalary, and aggregate with sum, mean, etc.
Sort the data and display to show who get the highest salary
0. Try to group on JobTitle only and sort the data and display
0. How many employess are there for each JobRoles and Graph it
0. Graph and show which Job Title spends the most
0. List All the Agency ID and Agency Name
0. Find all the missing Gross data in the dataset
"""
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('E:\Forsk Pandas\CSV files\Baltimore_City_Employee_Salaries_FY2014.csv')
print(df)
df.columns
# remove the dollar signs in the AnnualSalary field and assign it as a int.
df['AnnualSalary'] #Annual salary is in float and we need to convert into int64 format.
"""
0 11310.0
1 53428.0
2 68300.0
3 62000.0
4 43999.0
18976 11310.0
18977 11310.0
18978 43999.0
18979 44104.0
18980 53568.0
Name: AnnualSalary, Length: 18981, dtype: float64
"""
df['AnnualSalary'].astype('int64')
"""
0 11310
1 53428
2 68300
3 62000
4 43999
18976 11310
18977 11310
18978 43999
18979 44104
18980 53568
Name: AnnualSalary, Length: 18981, dtype: int64
"""
# Group the data on JobTitle and AnnualSalary, and aggregate with sum, mean, etc.
#Sort the data and display to show who get the highest salary
df[['JobTitle','AnnualSalary']]
df['AnnualSalary'].agg(['sum','mean','std'])
sum 7.546237e+08
mean 3.975679e+04
std 2.517305e+04
Name: AnnualSalary, dtype: float64
# Try to group on JobTitle only and sort the data and display.
df['JobTitle']
df['JobTitle'].unique()
df['JobTitle'].count() # 18981
sort=df['JobTitle']
sorted(sort)
# How many employess are there for each JobRoles and Graph it.
df.columns
df['JobTitle'].value_counts()
df['JobTitle'].unique()
plt.pie(df['JobTitle'].value_counts(),labels=df['JobTitle'].unique(),radius =3 , autopct='%1.2f%%')
plt.savefig('E:\Forsk Pandas\Baltmore.jpg')
plt.show()
df.columns
# Graph and show which Job Title spends the most.
df['HireDate'].value_counts()
df['HireDate'].unique()
plt.pie(df['HireDate'].value_counts(dropna=False),labels=df['HireDate'].unique(),radius =3 , autopct='%1.2f%%')
plt.savefig('E:\Forsk Pandas\Baltmore1.jpg')
plt.show()
# List All the Agency ID and Agency Name.
df[['AgencyID','Agency']]
AgencyID Agency
0 W02200 Youth Summer
1 A03031 OED-Employment Dev
2 A29005 States Attorneys Office
3 A65026 HLTH-Health Department
4 A99416 Police Department
... ...
18976 W02235 Youth Summer
18977 W02629 Youth Summer
18978 A99416 Police Department
18979 A99262 Police Department
18980 A50206 DPW-Water & Waste Water
[18981 rows x 2 columns]
# Find all the missing Gross data in the dataset .
df[df[df.columns].isnull()]
Name JobTitle AgencyID Agency HireDate AnnualSalary GrossPay
0 NaN NaN NaN NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN NaN NaN NaN
3 NaN NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN NaN
... ... ... ... ... ... ...
18976 NaN NaN NaN NaN NaN NaN NaN
18977 NaN NaN NaN NaN NaN NaN NaN
18978 NaN NaN NaN NaN NaN NaN NaN
18979 NaN NaN NaN NaN NaN NaN NaN
18980 NaN NaN NaN NaN NaN NaN NaN
[18981 rows x 7 columns]
---------------------------------------------------------------------------------------
plt.figure(figsize= (50,10))
x = df['JobTitle'].unique()
y = df['JobTitle'].value_counts()
plt.bar(x,y)
# simple line plot
plt.plot(x,y)
plt.savefig('E:\Forsk Pandas\Baltmore_Bar.jpg')
plt.show()
|
UTF-8
|
Python
| false | false | 4,541 |
py
| 553 |
baltimore.py
| 450 | 0.590399 | 0.506276 | 0 | 162 | 26.876543 | 111 |
ronrest/deepQ_bananas
| 17,566,416,275,950 |
9cc61b6dbf345cd5b710ece8fee67627a7e5c0d8
|
5fff6e387f47b564c8b3d214af459c72553e6eb2
|
/model.py
|
41b66330a2a2fb450c7cd2649701a397fbc5e72b
|
[] |
no_license
|
https://github.com/ronrest/deepQ_bananas
|
63a4ba9007d9f0a9f65587c3a8caceab0ea4eeca
|
0ec3965140bcf399a209d63c9137ce65d5fb55a2
|
refs/heads/master
| 2020-03-27T11:41:45.220679 | 2018-08-28T20:41:29 | 2018-08-28T20:41:29 | 146,502,670 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class QNetwork(nn.Module):
""" A neural network that represents the Q-action values for a
reinforcement learning agent.
Two fully connected hidden layers, with ReLu activations, followed by
a fully connected output layer.
"""
def __init__(self, state_size, action_size, seed=333, n_hidden=32):
""" Initialize the weights and build the components of the model.
Args:
state_size (int): Shape of the state (as the input layer size)
action_size (int): Number of actions (as the output layer size)
seed (int): Random seed, for reproducibility
n_hidden (int): Number of nodes in the hidden layers.
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(in_features=state_size, out_features=n_hidden, bias=True)
self.fc2 = nn.Linear(in_features=n_hidden, out_features=n_hidden, bias=True)
self.fc_out = nn.Linear(in_features=n_hidden, out_features=action_size, bias=False)
def forward(self, state):
""" Builds the forward pass structure of the network, which maps from
state to action values
"""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
x = self.fc_out(x)
return x
|
UTF-8
|
Python
| false | false | 1,402 |
py
| 8 |
model.py
| 5 | 0.624108 | 0.617689 | 0 | 34 | 40.235294 | 91 |
AllenInstitute/bmtk
| 7,980,049,266,364 |
f13e67d855a181fd9f3a60adfe3b1b1a06af8c99
|
d813a392c7cbc8dbbec273b3a2366a50f9df45c9
|
/bmtk/utils/sonata/group.py
|
589d5a6a8ba264783c0e20a106a65529e5436f62
|
[
"BSD-3-Clause"
] |
permissive
|
https://github.com/AllenInstitute/bmtk
|
1496d6e0bd7cbef7b1b8cac64a8589d01548f897
|
ae9c24c415a3fbd60397b4ead160b72b4b3e4e4f
|
refs/heads/develop
| 2023-08-24T20:09:32.763686 | 2023-08-20T18:29:19 | 2023-08-20T18:29:19 | 104,507,294 | 253 | 111 |
BSD-3-Clause
| false | 2023-08-24T14:38:57 | 2017-09-22T18:42:44 | 2023-08-14T14:51:54 | 2023-08-24T13:29:55 | 201,704 | 239 | 79 | 37 |
Python
| false | false |
# Copyright 2017. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import pandas as pd
from .column_property import ColumnProperty
from .node import Node, NodeSet
from .edge import Edge, EdgeSet
class Group(object):
"""A container containig a node/edge population groups.
A node or edge population will have one or more groups, each having a unique identifier. Each group shared the same
columns and datatypes, thus each group is essentially a different model.
"""
def __init__(self, group_id, h5_group, parent):
self._group_id = int(group_id)
self._parent = parent
self._types_table = parent.types_table
self._h5_group = h5_group
self._types_index_col = self._types_table.index_column_name
self._group_columns = ColumnProperty.from_h5(h5_group)
# TODO: combine group_columns, group_column_names and group_columns_map, doesn't need to be 3 structures
self._group_column_map = {col.name: col for col in self._group_columns}
self._group_column_names = set(col.name for col in self._group_columns)
self._group_table = {prop: h5_group[prop.name] for prop in self._group_columns}
self._ncolumns = len(self._group_columns)
self._all_columns = self._group_columns + self._types_table.columns
self._all_column_names = set(col.name for col in self._all_columns)
self._nrows = 0 # number of group members
# For storing dynamics_params subgroup (if it exists)
self._has_dynamics_params = 'dynamics_params' in self._h5_group and len(self._h5_group['dynamics_params']) > 0
self._dynamics_params_columns = []
# An index of all the rows in parent population that map onto a member of this group
self._parent_indicies = None # A list of parent rows indicies
self._parent_indicies_built = False
self.check_format()
@property
def group_id(self):
return self._group_id
@property
def has_dynamics_params(self):
return False
@property
def columns(self):
return self._group_columns
@property
def group_columns(self):
return self._group_columns
@property
def all_columns(self):
return self._all_columns
@property
def has_gids(self):
return self._parent.has_gids
@property
def parent(self):
return self._parent
def get_dataset(self, column_name):
return self._group_table[column_name]
def column(self, column_name, group_only=False):
if column_name in self._group_column_map:
return self._group_column_map[column_name]
elif not group_only and column_name in self._types_table.columns:
return self._types_table.column(column_name)
else:
return KeyError
def check_format(self):
# Check that all the properties have the same number of rows
col_counts = [col.nrows for col in self._group_columns + self._dynamics_params_columns]
if len(set(col_counts)) > 1:
# TODO: Would be nice to warn user which dataset have different size
raise Exception('properties in {}/{} have different ranks'.format(self._parent.name, self._group_id))
elif len(set(col_counts)) == 1:
self._nrows = col_counts[0]
def build_indicies(self, force=False):
raise NotImplementedError
def to_dataframe(self):
raise NotImplementedError
def get_values(self, property_name, all_rows=False):
"""Returns all values for a group property.
Note that a row within a group may not have a corresponding node/edge, or they may have a different order or
multiple node/edges may share the same group row. Setting all_rows=False will return all the values as you
see if you iterated through all the population's items. Setting all_rows=True just returns the data as a
list as they appear in the dataset (will be faster).
:param property_name: Name of dataset property/column to fetch.
:param all_rows: Set false to return order in which they appear in population, false to return entire dataset
:return: A list of values for the given column name.
"""
raise NotImplementedError
def __len__(self):
return self._nrows
def __getitem__(self, group_index):
group_props = {}
for cname, h5_obj in self._group_table.items():
group_props[cname] = h5_obj[group_index]
return group_props
def __contains__(self, prop_name):
"""Search that a column name exists in this group"""
return prop_name in self._group_column_names
class NodeGroup(Group):
def __init__(self, group_id, h5_group, parent):
super(NodeGroup, self).__init__(group_id, h5_group, parent)
# Note: Don't call build_indicies right away so uses can call __getitem__ without having to load all the
# node_ids
@property
def node_ids(self):
self.build_indicies()
# print self._parent_indicies
return self._parent.inode_ids(self._parent_indicies)
@property
def node_type_ids(self):
self.build_indicies()
return self._parent.inode_type_ids(self._parent_indicies)
@property
def gids(self):
self.build_indicies()
return self._parent.igids(self._parent_indicies)
def build_indicies(self, force=False):
if self._parent_indicies_built and not force:
return
# TODO: Check for the special case where there is only one group
# TODO: If memory becomes an issue on very larget nodes (10's of millions) consider using a generator
# I've pushed the actual building of the population->group indicies onto the parent population
self._parent_indicies = self._parent.group_indicies(self.group_id, build_cache=True)
self._parent_indicies_built = True
def get_values(self, property_name, filtered_indicies=True):
self.build_indicies()
# TODO: Check if property_name is node_id, node_type, or gid
if property_name in self._group_columns:
if not filtered_indicies:
# Just return all values in dataset
return np.array(self._group_table[property_name])
else:
# Return only those values for group indicies with associated nodes
grp_indicies = self._parent.igroup_indicies(self._parent_indicies)
# It is possible that the group_index is unorderd or contains duplicates which will cause h5py slicing
# to fail. Thus convert to a numpy array
# TODO: loading the entire table is not good if the filtered nodes is small, consider building.
tmp_array = np.array(self._group_table[property_name])
return tmp_array[grp_indicies]
elif property_name in self._parent.node_types_table.columns:
# For properties that come from node-types table we need to build the results from scratch
# TODO: Need to performance test, I think this code could be optimized.
node_types_table = self._parent.node_types_table
nt_col = node_types_table.column(property_name)
tmp_array = np.empty(shape=len(self._parent_indicies), dtype=nt_col.dtype)
for i, ntid in enumerate(self.node_type_ids):
tmp_array[i] = node_types_table[ntid][property_name]
return tmp_array
def to_dataframe(self):
self.build_indicies()
# Build a dataframe of group properties
# TODO: Include dynamics_params?
properties_df = pd.DataFrame()
for col in self._group_columns:
if col.dimension > 1:
for i in range(col.dimension):
# TODO: see if column name exists in the attributes
col_name = '{}.{}'.format(col.name, i)
properties_df[col_name] = pd.Series(self._h5_group[col.name][:, i])
else:
properties_df[col.name] = pd.Series(self._h5_group[col.name])
# Build a dataframe of parent node (node_id, gid, node_types, etc)
root_df = pd.DataFrame()
root_df['node_type_id'] = pd.Series(self.node_type_ids)
root_df['node_id'] = pd.Series(self.node_ids)
root_df['node_group_index'] = pd.Series(self._parent.igroup_indicies(self._parent_indicies)) # used as pivot
if self._parent.has_gids:
root_df['gid'] = self.gids
# merge group props df with parent df
results_df = root_df.merge(properties_df, how='left', left_on='node_group_index', right_index=True)
results_df = results_df.drop('node_group_index', axis=1)
# Build node_types dataframe and merge
node_types_df = self._parent.node_types_table.to_dataframe()
# remove properties that exist in the group
node_types_cols = [c.name for c in self._parent.node_types_table.columns if c not in self._group_columns]
node_types_df = node_types_df[node_types_cols]
# TODO: consider caching these results
return results_df.merge(node_types_df, how='left', left_on='node_type_id', right_index=True)
def filter(self, **filter_props):
"""Filter all nodes in the group by key=value pairs.
The filter specifications may apply to either node_type or group column properties. Currently at the moment
it only supports equivlency. An intersection (and operator) is done for every different filter pair. This will
produce a generator of all nodes matching the the filters.
for node in filter(pop_name='VIp', depth=10.0):
assert(node['pop_name'] == 'VIp' and node['depth'] == 10.0)
:param filter_props: keys and their values to filter nodes on.
:return: A generator that produces all valid nodes within the group with matching key==value pairs.
"""
# TODO: Integrate this with NodeSet.
self.build_indicies()
node_types_table = self._parent.node_types_table
node_type_filter = set(node_types_table.node_type_ids) # list of valid node_type_ids
type_filter = False
group_prop_filter = {} # list of 'prop_name'==prov_val for group datasets
group_filter = False
node_id_filter = []
# Build key==value lists
for filter_key, filter_val in filter_props.items():
# TODO: Check if node_type_id is an input
if filter_key in self._group_columns:
# keep of list of group_popertiess to filter
group_prop_filter[filter_key] = filter_val
group_filter = True
elif filter_key in node_types_table.columns:
# for node_types we just keep a list of all node_type_ids with matching key==value pairs
node_type_filter &= set(node_types_table.find(filter_key, filter_val))
type_filter = True
elif filter_key in ['node_id', 'node_ids']:
node_id_filter += filter_val
else:
# TODO: should we raise an exception?
# TODO: User logger
print('Could not find property {} in either group or types table. Ignoring.'.format(filter_key))
# iterate through all nodes, skipping ones that don't have matching key==value pairs
for indx in self._parent_indicies:
# TODO: Don't build the node until you filter out node_type_id
node = self._parent.get_row(indx)
if type_filter and node.node_type_id not in node_type_filter:
# confirm node_type_id is a correct one
continue
if node_id_filter and node.node_id not in node_id_filter:
continue
if group_filter:
# Filter by group property values
# TODO: Allow group properties to handle lists
src_failed = True
for k, v in group_prop_filter.items():
if node[k] != v:
break
else:
src_failed = False
if src_failed:
continue
yield node
def __iter__(self):
self.build_indicies()
# Pass a list of indicies into the NodeSet, the NodeSet will take care of the iteration
return NodeSet(self._parent_indicies, self._parent).__iter__()
class EdgeGroup(Group):
def __init__(self, group_id, h5_group, parent):
super(EdgeGroup, self).__init__(group_id, h5_group, parent)
self._indicies_count = 0 # Used to keep track of number of indicies (since it contains multple ranges)
self.__itr_index = 0
self.__itr_range = []
self.__itr_range_idx = 0
self.__itr_range_max = 0
def build_indicies(self, force=False):
if self._parent_indicies_built and not force:
return
# Saves indicies as a (potentially empty) list of ranges
# TODO: Turn index into generator, allows for cheaper iteration over the group
self._indicies_count, self._parent_indicies = self._parent.group_indicies(self.group_id, build_cache=False)
self._parent_indicies_built = True
def to_dataframe(self):
self.build_indicies()
# Build a dataframe of group properties
# TODO: Include dynamics_params?
properties_df = pd.DataFrame()
for col in self._group_columns:
if col.dimension > 1:
for i in range(col.dimension):
# TODO: see if column name exists in the attributes
col_name = '{}.{}'.format(col.name, i)
properties_df[col_name] = pd.Series(self._h5_group[col.name][:, i])
else:
properties_df[col.name] = pd.Series(self._h5_group[col.name])
# Build a dataframe of parent node
root_df = pd.DataFrame()
root_df['edge_type_id'] = pd.Series(self.edge_type_ids)
root_df['source_node_id'] = pd.Series(self.src_node_ids)
root_df['target_node_id'] = pd.Series(self.trg_node_ids)
root_df['edge_group_index'] = pd.Series(self._parent.group_indicies(self.group_id, as_list=True)) # pivot col
# merge group props df with parent df
results_df = root_df.merge(properties_df, how='left', left_on='edge_group_index', right_index=True)
results_df = results_df.drop('edge_group_index', axis=1)
# Build node_types dataframe and merge
edge_types_df = self._parent.edge_types_table.to_dataframe()
# remove properties that exist in the group
edge_types_cols = [c.name for c in self._parent.edge_types_table.columns if c not in self._group_columns]
edge_types_df = edge_types_df[edge_types_cols]
# TODO: consider caching these results
return results_df.merge(edge_types_df, how='left', left_on='edge_type_id', right_index=True)
def _get_parent_ds(self, parent_ds):
self.build_indicies()
ds_vals = np.zeros(self._indicies_count, dtype=parent_ds.dtype)
c_indx = 0
for indx_range in self._parent_indicies:
indx_beg, indx_end = indx_range[0], indx_range[1]
n_indx = c_indx + (indx_end - indx_beg)
ds_vals[c_indx:n_indx] = parent_ds[indx_beg:indx_end]
c_indx = n_indx
return ds_vals
@property
def src_node_ids(self):
return self._get_parent_ds(self.parent._source_node_id_ds)
@property
def trg_node_ids(self):
return self._get_parent_ds(self.parent._target_node_id_ds)
@property
def edge_type_ids(self):
return self._get_parent_ds(self.parent._type_id_ds)
def get_values(self, property_name, all_rows=False):
# TODO: Need to take into account if property_name is in the edge-types
if property_name not in self.columns:
raise KeyError
if all_rows:
return np.array(self._h5_group[property_name])
else:
self.build_indicies()
# Go through all ranges and build the return list
dataset = self._h5_group[property_name]
return_list = np.empty(self._indicies_count, self._h5_group[property_name].dtype)
i = 0
for r_beg, r_end in self._parent_indicies:
r_len = r_end - r_beg
return_list[i:(i+r_len)] = dataset[r_beg:r_end]
i += r_len
return return_list
def filter(self, **filter_props):
# TODO: I'm not sure If I want to do this? Need to check on a larger dataset than I currently have.
raise NotImplementedError
def __iter__(self):
self.build_indicies()
# TODO: Implement using an EdgeSet
if len(self._parent_indicies) == 0:
self.__itr_max_range = 0
self.__itr_range = []
self.__itr_index = 0
else:
# Stop at the largest range end (I'm not sure if the indicies are ordered, if we can make it ordered then
# in the future just use self_parent_indicies[-1][1]
self.__itr_range_max = len(self._parent_indicies)
self.__itr_range_idx = 0
self.__itr_range = self._parent_indicies[0]
self.__itr_index = self.__itr_range[0]
return self
def next(self):
return self.__next__()
def __next__(self):
if self.__itr_range_idx >= self.__itr_range_max:
raise StopIteration
nxt_edge = self._parent.get_row(self.__itr_index)
self.__itr_index += 1
if self.__itr_index >= self.__itr_range[1]:
# iterator has moved past the current range
self.__itr_range_idx += 1
if self.__itr_range_idx < self.__itr_range_max:
# move the iterator onto next range
self.__itr_range = self._parent_indicies[self.__itr_range_idx] # update range
self.__itr_index = self.__itr_range[0] # update iterator to start and the beginning of new range
else:
self.__itr_range = []
return nxt_edge
|
UTF-8
|
Python
| false | false | 19,742 |
py
| 625 |
group.py
| 329 | 0.625975 | 0.622581 | 0 | 457 | 42.199125 | 120 |
itdream-dev/python
| 13,374,528,198,069 |
b703be32bf63e0b44cc06559ad5e182fae9754cb
|
079c07c5d97eb60d36269e27309e84b25ea0aaeb
|
/guidehero-backend/lib/models/answer.py
|
8a8bf5de12fd00b9b80792ab04d4528620647756
|
[] |
no_license
|
https://github.com/itdream-dev/python
|
3aa44329673f05e2a86e1cba56cb88101c777233
|
eda81b802b99f45933bdf0d22b508837cfa538f0
|
refs/heads/master
| 2023-03-05T12:27:42.776870 | 2020-05-11T15:54:45 | 2020-05-11T15:54:45 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
from tag import Tag # NOQA
from lib.registry import get_registry
db = get_registry()['DB']
relationship_table = db.Table(
'answer_association',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('answer_id', db.Integer, db.ForeignKey('answer.id'))
)
class Answer(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
tags = db.relationship(
'Tag', secondary=relationship_table, backref='answer'
)
|
UTF-8
|
Python
| false | false | 532 |
py
| 221 |
answer.py
| 194 | 0.657895 | 0.650376 | 0 | 20 | 25.6 | 66 |
MBMunzi/Math_Algorithm
| 4,750,233,871,073 |
6b923be428214cc757a10072a284b8662aa4c3a1
|
5668fc337f2365f3b5cf1efb98ba7d975ed5da20
|
/Test.Polynomial.py
|
279ae69a70405b794248fd7aa03a3ed26cdf101e
|
[] |
no_license
|
https://github.com/MBMunzi/Math_Algorithm
|
c65a2d0698820301c8b3d6e06cf982227762a2f6
|
8f46c3c91535504fded01c13ce5f2d059ef8e878
|
refs/heads/master
| 2020-09-25T09:38:35.439611 | 2020-03-17T20:02:36 | 2020-03-17T20:02:36 | 225,976,873 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import unittest
from .Polynomio import Polynomial
class TestPolynomial(unittest.TestCase):
def test_evaluate_roots_about_polynomial(self):
roots = [-3.0, -2.0, 1.0]
self.assertEqual(roots, Polynomial([1, 4, 1, -6]))
def test_Evaluate_Polynomial(self):
self.assertEqual(0, Polynomial.evaluate_polynomial(1))
if __name__ == "__main__":
unittest.main()
|
UTF-8
|
Python
| false | false | 394 |
py
| 2 |
Test.Polynomial.py
| 2 | 0.654822 | 0.624365 | 0 | 16 | 23.5 | 62 |
victor-nerd/finansez-python
| 17,875,653,900,137 |
bb962265c165fec97c34d6ab7c983d0a8f47a1d4
|
b6b4a0f011f9fadcde3a800e73e0afbf2377ad79
|
/finansez/finansez/api/serializers/category_view.py
|
6ebf682e49a9649a0e10f3fa10ecb2c1d810b919
|
[] |
no_license
|
https://github.com/victor-nerd/finansez-python
|
c65a3a3369665176c8bd56ad993c9f0a9af78b41
|
48f83fc34d5ef534551f31c70c79e96e50ae2a74
|
refs/heads/master
| 2019-04-07T11:32:55.586359 | 2017-04-01T08:15:17 | 2017-04-01T08:15:17 | 86,661,342 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty
from finansez.api.responses.category_info import CategoryInfoResponse
from finansez.api.serializers.base import BaseApiSerializer
from finansez.core.models import Category
class CategoryViewSerializer(BaseApiSerializer):
id = serializers.UUIDField(required=True)
def __init__(self, instance=None, data=empty, **kwargs):
self.category = None
super().__init__(instance, data, **kwargs)
def validate(self, data):
try:
self.category = Category.objects.get(id=data['id'], user=self.user, deleted=False)
except Category.DoesNotExist:
raise ValidationError({'id': 'Категория не существует, удалена или принадлежит другому пользователю.'})
return data
def process_internal(self):
return CategoryInfoResponse(self.category).get_response()
|
UTF-8
|
Python
| false | false | 1,020 |
py
| 90 |
category_view.py
| 77 | 0.729927 | 0.729927 | 0 | 26 | 35.846154 | 115 |
dr-dos-ok/Code_Jam_Webscraper
| 7,997,229,131,612 |
9b6826e35989d84b628504b9d0e073131df53c2a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_43/14.py
|
96aa39bf1ca67c879035913bde0bf7096e818b9f
|
[] |
no_license
|
https://github.com/dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
execfile('utils.py')
def process(fn):
cases = [x.strip() for x in read1(fn)[1:]]
res = [processCase(x) for x in cases]
write2(fn+".out",res)
def processCase(case):
scase = set(case)
base = len(scase)
lencase = len(case)
res = 0
if base == 1:
base = 2
digitsOrder = [1,0]+range(2,base)
digitsIndex = 0
T = {}
for i in range(lencase):
if case[i] not in T:
T[case[i]] = digitsOrder[digitsIndex]
digitsIndex += 1
res *= base
res += T[case[i]]
return str(res)
|
UTF-8
|
Python
| false | false | 597 |
py
| 60,747 |
14.py
| 60,742 | 0.500838 | 0.482412 | 0 | 28 | 19.321429 | 49 |
Bikashbhaila/mycode
| 16,844,861,745,760 |
6b4315768d7faa0acbfa75a4142351aaa32ac660
|
cc4b04c90f9546f734efb938c5a4d6694b2f4b84
|
/FizzBuzz/fizz_buzz01.py
|
0db8d99df5e07c9bfd4ea868cf41df79b0536962
|
[] |
no_license
|
https://github.com/Bikashbhaila/mycode
|
f87befdca556c06815e8815c466c5f667a7f780b
|
cd82fb965f1c26480a07921ab391415919b27013
|
refs/heads/main
| 2023-07-10T16:15:11.907952 | 2021-08-13T21:01:54 | 2021-08-13T21:01:54 | 392,015,300 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#!/usr/bin/env python3
def readFile():
with open("numfile.txt", "r") as input_file:
numlist = []
for num in input_file:
numlist.append(int(num))
return numlist
def playGame(numlist):
fizz, buzz, fizzbuzz = 0, 0, 0
for num in numlist:
if num % 3 == 0 and num % 5 == 0:
print("FizzBuzz")
fizzbuzz +=1
elif num % 3 == 0:
print("Fizz")
fizz +=1
elif num % 5 == 0:
print("Buzz")
fizzbuzz+=1
else:
print(num)
return "Fizzes: {fizz} Buzzes: {buzz} FizzBuzzes: {fizzbuzz}"
def main():
playGame(readFile())
if __name__ == "__main__":
main()
|
UTF-8
|
Python
| false | false | 714 |
py
| 25 |
fizz_buzz01.py
| 22 | 0.483193 | 0.462185 | 0 | 31 | 22.032258 | 66 |
sala0320/Algorithm
| 13,615,046,328,710 |
b4e074fb1b50cc816971e7daff5823637235a6d8
|
75730862a05fe5f53655f784f299c542d9c55c37
|
/BFS+DFS/BackJoon/1520.py
|
32926ad64d601fc318798bfd321a5ca8c09a852b
|
[] |
no_license
|
https://github.com/sala0320/Algorithm
|
630404c0f053b0e87c0a16804245fa836a24e29b
|
c4abc8070b7e9c4cbee7e691b4e80c07efce0f3d
|
refs/heads/main
| 2023-04-21T08:04:55.565437 | 2022-02-25T05:27:31 | 2022-02-25T05:27:31 | 365,438,662 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#DFS + DP
import sys
sys.setrecursionlimit(10**5)
input = sys.stdin.readline
M, N = map(int, input().split())
board = [list(map(int, input().split())) for _ in range(M)]
dir = [[1,0], [0,1], [-1,0], [0,-1]]
dp = [[-1 for _ in range(N)] for _ in range(M)]
def dfs(x, y):
#끝점에 도달하면 1 반환(끝점에서 끝점까지 갈 수 있는 길은 하나니까)
if x == M-1 and y == N-1:
return 1
#한번도 방문하지 않은 곳이라면
if dp[x][y] == -1:
dp[x][y] = 0
for d in dir:
nx = x + d[0]
ny = y + d[1]
if nx < 0 or nx >= M or ny < 0 or ny >= N:
continue
#다음으로 가는 곳이 지금보다 작으면
if board[nx][ny] < board[x][y]:
#dp 현재 위치에서 끝점까지 갈 수 있는 경로의 수 = 다음 위치에서 끝점까지 갈 수 있는 경로의 수들의 합
dp[x][y] += dfs(nx, ny)
return dp[x][y]
print(dfs(0,0))
print(dp)
|
UTF-8
|
Python
| false | false | 1,014 |
py
| 136 |
1520.py
| 122 | 0.467312 | 0.438257 | 0 | 30 | 26.566667 | 77 |
tedrepo/go_bot
| 16,338,055,601,612 |
830de61f6f095c8483fe03bca2f0e3675a33e5aa
|
f4406f32e727707cc787212520dd952f73afdcc7
|
/slotfill.py
|
0987323ede680629fe1554f39bb75b9e854c2434
|
[] |
no_license
|
https://github.com/tedrepo/go_bot
|
3256b8c53e7f962bad801a9797587e10341d2a8a
|
3f02f189346c0a8011c64b2a6ce25c65baf1e574
|
refs/heads/master
| 2020-03-28T03:47:27.421768 | 2018-09-09T12:18:44 | 2018-09-09T12:18:44 | 147,671,447 | 0 | 1 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import json
from fuzzywuzzy import process
from overrides import overrieds
from math import exp
from collections import defaultdict
class DstcSlotFillingNetwork():
"""
"""
def __init__(self, threshold=0.8, **kwargs):
self.threshold = threshold
self.load()
def __call__(self, tokens_batch, tags_batch, *args, **kwargs):
slots = [{}] * len(tokens_batch)
m = [i for i, v in enumerate(tokens_batch) if v]
if m:
tags_batch = [tags_batch[i] for i in m]
tokens_batch = [tokens_batch[i] for i in m]
for i, tokens, tags in zip(m, tokens_batch, tags_batch):
slots[i] = self.predict_slots(tokens, tags)
return slots
def predict_slots(slot, tokens, tags):
entities, slots = self._chunk_finder(tokens, tags)
slot_values = {}
for entity, slot in zip(entities, slots):
match, score = self.ner2slot(entity, slot)
if score >= self.threshold * 100:
slot_values[slot] = match
return slot_values
def ner2slot(self, input_entity, slot):
if isinstance(input_entity, list):
input_entity = " ".join(input_entity)
entities = []
normalized_slot_vals = []
for entity_name in self._slot_vals[slot]:
for entity in self._slot_vals[slot][entity_name]:
entites.append(entity)
normalized_slot_vals.append(entity_name)
best_match, score = process.extract(input_entity, entites, limit=2**20)[0]
def _chunk_finder(tokens, tags):
# For BIO labeled sequence of tags extract all named entities form tokens
prev_tag = ''
chunk_tokens = []
entities = []
slots = []
for token, tag in zip(tokens, tags):
curent_tag = tag.split('-')[-1].strip()
current_prefix = tag.split('-')[0]
if tag.startswith('B-'):
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
chunk_tokens.append(token)
if current_prefix == 'I':
if curent_tag != prev_tag:
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
else:
chunk_tokens.append(token)
if current_prefix == 'O':
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
chunk_tokens = []
prev_tag = curent_tag
if len(chunk_tokens) > 0:
entities.append(' '.join(chunk_tokens))
slots.append(prev_tag)
return entities, slots
def save(self,*args, **kwargs):
with open(self.save_path, 'w', encoding='utf8') as f:
json.dump(self._slot_vals, f)
def load(self, *args, **kwargs):
if open(self.load_path, encoding='utf8') as f:
self._slot_vals = json.laod(f)
class SlotFillingComponent():
def __init__(self, threshold=0.7, return_all=False, **kwargs):
self.threshold = threshold
self.return_all = return_all
self._slot_vals = None
self.load()
def __call__(self, batch, *args, **kwargs):
if isinstance(batch[0], str):
batch = [tokenize_reg(instance.strip()) for instance in batch]
slots = [{}] * len(batch)
|
UTF-8
|
Python
| false | false | 3,650 |
py
| 20 |
slotfill.py
| 19 | 0.532877 | 0.526575 | 0 | 107 | 33.093458 | 82 |
zhanglei1949/AttackSpeakerVerificationSystem
| 11,175,504,908,020 |
3c2f3cc8b9708fd532a90c06d1019c6f43ea5728
|
56aba02e07581eb3de286fe361d3e590af5aee03
|
/utils/copy_vad_clip.py
|
4207858e078eff899d351e19d4ca4f2247af551a
|
[] |
no_license
|
https://github.com/zhanglei1949/AttackSpeakerVerificationSystem
|
51daf281a7fe9b919fd6ef85703a3ef0a2e664c9
|
e698dec73132b420b16beb3ae544fe3eaffdddb8
|
refs/heads/master
| 2020-06-13T01:02:34.452105 | 2019-08-01T06:46:42 | 2019-08-01T06:46:42 | 194,480,818 | 5 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import glob
import numpy as np
import os
import soundfile
import sys
sys.path.append('../speakerVerificationSystem/')
from pre_process import read_audio
import constants as c
def VAD_and_save(source_path, target_path, target_len = 25840):
# do vad
audio = read_audio(source_path) # vad doneinside
if (len(audio) > target_len):
a = int( (len(audio) - target_len)/2 )
audio = audio[a : a + target_len]
elif (len(audio < target_len)):
return
soundfile.write(target_path, audio, c.SAMPLE_RATE, subtype='PCM_16')
def get_wavs(dir, pattern):
res = glob.glob(os.path.join(dir, pattern))
return res
def do_vad(source_dir, target_dir):
wavs = get_wavs(source_dir, "*/*/*.wav")
print("obtain %d wavs files" % len(wavs))
for i in range(len(wavs)):
wav = wavs[i]
basename = wav.split('/')[-1]
new_basename = basename[:-4] + '-clip.wav'
target_wav_path = target_dir + new_basename
VAD_and_save(wav, target_wav_path)
if (i%100 == 0):
print(wav, target_wav_path)
def clipped_audio(x, num_frames=c.NUM_FRAMES):
if x.shape[0] > num_frames + 20:
bias = np.random.randint(20, x.shape[0] - num_frames)
clipped_x = x[bias: num_frames + bias]
elif x.shape[0] > num_frames:
bias = np.random.randint(0, x.shape[0] - num_frames)
clipped_x = x[bias: num_frames + bias]
else:
clipped_x = x
return clipped_x
if __name__ == '__main__':
source_dir = '/home/lei/d/LibriSpeech/dev-clean/'
target_dir = '/home/lei/2019/dataset/LibriSpeech/dev-clean-clip/'
do_vad(source_dir, target_dir)
|
UTF-8
|
Python
| false | false | 1,653 |
py
| 66 |
copy_vad_clip.py
| 30 | 0.61222 | 0.595886 | 0 | 52 | 30.788462 | 72 |
nilankh/LeetCodeProblems
| 12,446,815,238,888 |
9d136cd73bb4c51c917d7f8f8cb86f7d1f51132d
|
46dacdcbaca4f1a3eeb6cc69aba7f37460673200
|
/Queue/QueueUsingTwoStacks#232.py
|
42aee906279af3445f109e99d7d4fd078a2f23b5
|
[] |
no_license
|
https://github.com/nilankh/LeetCodeProblems
|
668bcdc9736d89898fc39082f5271391d5e27395
|
3594ea53ccc9de69edfbc67e6eab6e3b20959f4d
|
refs/heads/master
| 2023-06-24T13:09:45.809535 | 2021-07-22T16:36:12 | 2021-07-22T16:36:12 | 266,919,021 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#232
class QueueUsingTwoStacks:
def __init__(self):
self.__s1=[]
self.__s2=[]
def enqueue(self,data):
#(O(n))
while(len(self.__s1)!=0):
self.__s2.append(self.__s1.pop())
self.__s1.append(data)
while(len(self.__s2)!=0):
self.__s1.append(self.__s2.pop())
return
def dequeue(self):
#(O(1))
if(len(self.__s1)==0):
return -1
return self.__s1.pop()
def front(self):
if(len(self.__s1)==0):
return -1
return self.__s1[-1]
def size(self):
return len(self.__s1)
def isEmpty(self):
return self.size()==0
q=QueueUsingTwoStacks()
q.enqueue(1)
q.enqueue(2)
q.enqueue(3)
q.enqueue(4)
while(q.isEmpty() is False):
print(q.front())
q.dequeue()
|
UTF-8
|
Python
| false | false | 841 |
py
| 134 |
QueueUsingTwoStacks#232.py
| 134 | 0.485137 | 0.449465 | 0 | 47 | 16.702128 | 45 |
mdrohmann/txtemplates
| 16,904,991,315,209 |
2c4e48653d564ab41273c9abe2a60f113c5ccf79
|
b4687bd0817c6d00d8afde2721102416462cd73f
|
/tests/server_templates/test_server_templates.py
|
fd392113ed8a233092ec762e2013eaf4c2b6b26c
|
[] |
no_license
|
https://github.com/mdrohmann/txtemplates
|
a8818b5213c38f1d27d497e3578129d07f764c06
|
b346b15d3eb465ec828a31fea0a00df3ae582942
|
refs/heads/master
| 2020-06-02T21:37:31.690441 | 2015-04-21T16:00:41 | 2015-04-21T16:00:41 | 40,129,367 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# encoding: utf-8
import os
import argparse
import sys
import re
import pytest
from txtemplates import server_templates
import txtemplates
def test_get_parser_error(capsys):
parser = server_templates.get_parser()
with pytest.raises(SystemExit):
parser.parse_args([])
_, err = capsys.readouterr()
assert re.search('error: too few arguments', err)
@pytest.mark.parametrize('argstr, expected', [
('module', {'name': 'module', 'module': 'txtemplates'}),
('module package', {'name': 'module', 'module': 'package'}),
('module package -C directory -f',
{'directory': 'directory', 'force_overwrite': True})
])
def test_get_parser(argstr, expected):
parser = server_templates.get_parser()
args = parser.parse_args(argstr.split(' '))
for (k, v) in expected.items():
assert hasattr(args, k)
assert getattr(args, k) == v
def test_get_target_module():
directory = os.path.dirname(os.path.dirname(txtemplates.__file__))
args = argparse.Namespace(module='txtemplates', directory=directory)
module = server_templates.get_target_module(args)
assert module == txtemplates
args = argparse.Namespace(module='txtemplates', directory='/tmp')
module = server_templates.get_target_module(args)
assert module == txtemplates
@pytest.fixture(scope="function")
def testpackage(tmpdir):
p = tmpdir.mkdir("testpackage").join("__init__.py")
p.write("")
args = argparse.Namespace(module='testpackage', directory=str(tmpdir))
package = server_templates.get_target_module(args)
return tmpdir, package
def test_dirs(testpackage):
tempdir, package = testpackage
basedir = str(tempdir)
parser = server_templates.get_parser()
args = parser.parse_args('module testpackage'.split(' '))
dirs = server_templates.Dirs(args, package)
assert dirs.module == os.path.join(basedir, 'testpackage', 'module')
assert dirs.twistedplugin == os.path.join(
basedir, 'testpackage', 'twisted', 'plugins')
assert dirs.testbase == os.path.join(basedir, 'tests')
assert dirs.test == os.path.join(basedir, 'tests', 'module')
def test_run(testpackage, monkeypatch, capsys):
tempdir, package = testpackage
monkeypatch.setattr(
sys, "argv",
"main.py testmodule testpackage -C {}"
.format(str(tempdir)).split(" "))
server_templates.main()
files = [str(f)[len(str(tempdir)):] for f in tempdir.visit()
if not str(f).endswith('.pyc') and not '__pycache__' in str(f)]
assert len(files) == 21
assert '/testpackage/testmodule/backend/__init__.py' in files
assert '/tests/testmodule/test_testmodule_backend.py' in files
# second run should skip all files
p = tempdir.join('testpackage').join('__init__.py')
text = "# This should not be overwritten"
p.write(text)
server_templates.main()
out, _ = capsys.readouterr()
assert re.search(text, p.read())
assert re.search('exists: Skipped', out)
# another run with overwrite flag turned on, should overwrite the existing
# files.
monkeypatch.setattr(
sys, "argv",
"main.py testmodule testpackage -C {} -f"
.format(str(tempdir)).split(" "))
server_templates.main()
p = tempdir.join('testpackage').join('__init__.py')
out, _ = capsys.readouterr()
assert re.search(text, p.read())
assert not re.search('exists: Skipped', out)
# vim:set ft=python sw=4 et spell spelllang=en:
|
UTF-8
|
Python
| false | false | 3,487 |
py
| 53 |
test_server_templates.py
| 45 | 0.658159 | 0.657012 | 0 | 107 | 31.588785 | 78 |
haka913/programmers
| 6,313,601,944,888 |
ba5056e8cb541938e5f4463209523bc333afff21
|
0c95ba47207f85f1d0d8a500f174f459226695f3
|
/p_3_단속카메라.py
|
ddb950a9dddb6be1497ea9c8afecae97646a185a
|
[] |
no_license
|
https://github.com/haka913/programmers
|
31cf37117ce220e3992ff2f3cb5a4c59f280c457
|
2cee9ef1a77fb7fe8a1b6aa738485720efafc254
|
refs/heads/master
| 2022-12-04T14:40:00.993494 | 2020-09-02T12:48:30 | 2020-09-02T12:48:30 | 255,633,491 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def solution(routes):
routes.sort()
cam = 30000
answer = 0
for route in reversed(routes):
if cam > route[1]:
answer += 1
cam = route[0]
return answer
print(solution(routes=[[-20, 15], [-14, -5], [-18, -13], [-5, -3]]))
|
UTF-8
|
Python
| false | false | 275 |
py
| 82 |
p_3_단속카메라.py
| 82 | 0.498182 | 0.418182 | 0 | 14 | 18.642857 | 68 |
vivekshah1801/Python-RPC-Demo
| 9,904,194,634,430 |
dcf9ffd7491cd6dcd51b3ddcf3e64d46bde58296
|
08cb94509796aae2a8b0ed4fd28bd51801ef157a
|
/client.py
|
491d1c6d24268d5817a330f58dd44c52030d8f3a
|
[] |
no_license
|
https://github.com/vivekshah1801/Python-RPC-Demo
|
3f1e85fd18220aa24f11bb7485bddec91761d7a2
|
4dcd569c6c0cee634b43922f2a141a573fbfdb59
|
refs/heads/master
| 2022-12-11T05:24:50.142356 | 2020-08-31T05:04:08 | 2020-08-31T05:04:08 | 291,620,497 | 1 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import rpyc
host = "localhost"
port = 9876
r = rpyc.connect(host, port).root
print(r)
print(r.helloworld())
print("Done")
|
UTF-8
|
Python
| false | false | 125 |
py
| 3 |
client.py
| 2 | 0.688 | 0.656 | 0 | 11 | 10.454545 | 33 |
thohajati/scard-login
| 4,209,067,982,147 |
10ebeb1e39f6a5659547b4a162ca14705268be89
|
2d8b7f7f5d59005966b7802089a5f6f6ba8fee03
|
/SCardLogin_test.py
|
3f71d8099f0a52c1709977be4f30c9e767910cb0
|
[] |
no_license
|
https://github.com/thohajati/scard-login
|
b1f9d37f80e94d0f4c50b3cc9e923e4d4e262149
|
04afa1e6bc981bd5e6faf74459005949ba768c38
|
refs/heads/master
| 2021-01-19T16:42:50.552786 | 2017-08-21T07:43:44 | 2017-08-21T07:43:44 | 101,021,172 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import SCardLogin
if __name__ == '__main__':
dict = {}
print("Insert your card!")
dict = SCardLogin.get_scard_login()
print(str(dict))
|
UTF-8
|
Python
| false | false | 227 |
py
| 5 |
SCardLogin_test.py
| 3 | 0.387665 | 0.387665 | 0 | 8 | 19.625 | 43 |
devjaynemorais/tcc_gen_figures
| 5,729,486,418,889 |
3dad758e817bc40522f5e92a650c2d9e47733873
|
287df5ff9d3f3384af2c9bb33126caf1df558858
|
/bin/countstrategies.py
|
e9e1ec316fbb84703bd6186996456718ec2beaa6
|
[] |
no_license
|
https://github.com/devjaynemorais/tcc_gen_figures
|
eba58a6453c1f2a277ae0f79684c94c834f12cf5
|
07a1f7d0a060e797749926446546608a3bb14788
|
refs/heads/master
| 2022-07-07T19:22:02.479523 | 2020-05-12T00:27:55 | 2020-05-12T00:27:55 | 263,218,044 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
import sys
import pandas as pd
from importlib import reload
reload(sys)
# Classe que contem todas as estrategias
# de contagem de acertos
class EstrategiasDeContagem:
def __init__(self):
self.pond_df = pd.read_csv(
'data/medidas_ponderadas.csv', encoding='utf-8', sep=',')
self.pond_df = self.pond_df.set_index('PA')
self.pts_articulacao = {}
self.pts_articulacao['1-Quadril-EN'] = 'Espaco neutro do Quadril'
self.pts_articulacao['2-Quadril'] = 'Quadril'
self.pts_articulacao['3-Quadril-E'] = 'E Quadril'
self.pts_articulacao['4-Quadril-D'] = 'D Quadril'
self.pts_articulacao['5-Estomago-EN'] = 'Espaco neutro do Estomago'
self.pts_articulacao['6-Estomago'] = 'Estomago'
self.pts_articulacao['7-Estomago-E'] = 'E Estomago'
self.pts_articulacao['8-Estomago-D'] = 'D Estomago'
self.pts_articulacao['9-Peito-EN'] = 'Espaco neutro do Peito'
self.pts_articulacao['10-Peito'] = 'Peito'
self.pts_articulacao['11-Peito-E'] = 'E Peito'
self.pts_articulacao['12-Peito-D'] = 'D Peito'
self.pts_articulacao['13-Pescoco-EN'] = 'Espaco neutro do Pescoco'
self.pts_articulacao['14-Pescoco'] = 'Pescoco'
self.pts_articulacao['15-Ombro-E'] = 'E Ombro'
self.pts_articulacao['16-Ombro-D'] = 'D Ombro'
self.pts_articulacao['17-Cabeca-EN'] = 'Espaco neutro da Cabeca'
self.pts_articulacao['18-Cabeca'] = 'Cabeca'
self.pts_articulacao['19-Cabeca-E'] = 'E Cabeca'
self.pts_articulacao['20-Cabeca-D'] = 'D da Cabeca'
self.pts_articulacao['21-Acima-Cabeca'] = 'Acima da Cabeca'
self.pts_articulacao_inverse = {}
self.pts_articulacao_inverse['1-Quadril-EN'] = 'Espaco neutro do Quadril'
self.pts_articulacao_inverse['2-Quadril'] = 'Quadril'
self.pts_articulacao_inverse['3-Quadril-E'] = 'D Quadril'
self.pts_articulacao_inverse['4-Quadril-D'] = 'E Quadril'
self.pts_articulacao_inverse['5-Estomago-EN'] = 'Espaco neutro do Estomago'
self.pts_articulacao_inverse['6-Estomago'] = 'Estomago'
self.pts_articulacao_inverse['7-Estomago-E'] = 'D Estomago'
self.pts_articulacao_inverse['8-Estomago-D'] = 'E Estomago'
self.pts_articulacao_inverse['9-Peito-EN'] = 'Espaco neutro do Peito'
self.pts_articulacao_inverse['10-Peito'] = 'Peito'
self.pts_articulacao_inverse['11-Peito-E'] = 'D Peito'
self.pts_articulacao_inverse['12-Peito-D'] = 'E Peito'
self.pts_articulacao_inverse['13-Pescoco-EN'] = 'Espaco neutro do Pescoco'
self.pts_articulacao_inverse['14-Pescoco'] = 'Pescoco'
self.pts_articulacao_inverse['15-Ombro-E'] = 'D Ombro'
self.pts_articulacao_inverse['16-Ombro-D'] = 'E Ombro'
self.pts_articulacao_inverse['17-Cabeca-EN'] = 'Espaco neutro da Cabeca'
self.pts_articulacao_inverse['18-Cabeca'] = 'Cabeca'
self.pts_articulacao_inverse['19-Cabeca-E'] = 'D Cabeca'
self.pts_articulacao_inverse['20-Cabeca-D'] = 'E da Cabeca'
self.pts_articulacao_inverse['21-Acima-Cabeca'] = 'Acima da Cabeca'
# Estrategia baseada em condicionais para contagem de acertos
def todos(self, dataframe, args):
m_direita = str(args[0])
m_esquerda = str(args[1])
ponto_articulacao = str(args[2])
movimento = str(args[3])
nome = str(args[4])
if(movimento == 'Ambas'):
if((m_direita == self.pts_articulacao[ponto_articulacao]) and (m_esquerda == self.pts_articulacao[ponto_articulacao])):
return 1
elif ((m_direita == self.pts_articulacao_inverse[ponto_articulacao]) and (m_esquerda == self.pts_articulacao_inverse[ponto_articulacao])):
return 1
elif(m_direita == 'nan' or m_direita == 'FALHOU') or (m_esquerda == 'nan' or m_esquerda == 'FALHOU'):
return 3
else:
return 0
elif(movimento == 'Mao Esquerda'):
if (m_esquerda == self.pts_articulacao[ponto_articulacao]):
return 1
elif (m_esquerda == self.pts_articulacao_inverse[ponto_articulacao]):
return 1
elif(m_esquerda == 'nan' or m_esquerda == 'FALHOU'):
return 3
else:
return 0
elif(movimento == 'Mao Direita'):
if(m_direita == self.pts_articulacao[ponto_articulacao]):
return 1
elif (m_direita == self.pts_articulacao_inverse[ponto_articulacao]):
return 1
elif(m_direita == 'nan' or m_direita == 'FALHOU'):
return 3
else:
return 0
else:
return 4
def ponderado(self, dataframe, args):
m_direita = str(args[0])
m_esquerda = str(args[1])
ponto_articulacao = str(args[2])
movimento = str(args[3])
nome = str(args[4])
if (movimento == 'Mao Esquerda'):
if (m_esquerda != 'FALHOU'):
return self.pond_df.loc[ponto_articulacao][m_esquerda]
if (movimento == 'Mao Direita'):
if (m_direita != 'FALHOU'):
return self.pond_df.loc[ponto_articulacao][m_direita]
if (movimento == 'Ambas'):
if (m_direita != 'FALHOU' and m_esquerda != 'FALHOU'):
resultado_soma = (self.pond_df.loc[ponto_articulacao][m_esquerda] + self.pond_df.loc[ponto_articulacao][m_direita])
if (resultado_soma != 0): return resultado_soma / 2
return 0
|
UTF-8
|
Python
| false | false | 5,815 |
py
| 10 |
countstrategies.py
| 5 | 0.573001 | 0.556836 | 0 | 134 | 42.298507 | 154 |
doublevcodes/pyformatter
| 16,853,451,672,749 |
bc67a2474975ea4518c4cc1e4bb20392cf9f3b74
|
9738536bad6e3c7644ea99c0bae2ce8b41535ed1
|
/formatter/pep8/statement/comparisons.py
|
0df1d4ce8dbe777c8efb0ff47c6b71c1b91456b5
|
[] |
no_license
|
https://github.com/doublevcodes/pyformatter
|
7788518dfda49d5d974b851ba9682ac5c8850df4
|
1a061c071e66b90cc4e7f1fd2ce11b3754729ad5
|
refs/heads/master
| 2023-06-23T07:29:15.691694 | 2021-07-21T16:05:01 | 2021-07-21T16:05:01 | 387,006,646 | 0 | 0 | null | false | 2021-07-18T23:45:35 | 2021-07-17T17:59:57 | 2021-07-18T21:17:21 | 2021-07-18T23:45:35 | 11 | 0 | 0 | 1 |
Python
| false | false |
import ast
import itertools
from formatter.pep8.helpers import _reduce_module, _replace_tokens
class ComparisonParser:
def __init__(self, source: str) -> None:
self.source = source
return None
def get_comparisons(self):
for comp in self.get_none_comparisons():
yield comp
def get_none_comparisons(self):
comparisons = [comparison for comparison in self._parse()]
none_comparisons = [
comparison for comparison in comparisons
if (isinstance(comparison.left, ast.Constant) and (comparison.left.value) is None)
or (any((isinstance(comparator, ast.Constant) and (comparator.value is None)) for comparator in comparison.comparators))
]
for comparison in none_comparisons:
yield (
ast.unparse(
ComparisonParser.fix_none_comparisons(comparison)
),
comparison.lineno,
comparison.end_lineno,
comparison.col_offset,
comparison.end_col_offset
)
def _parse(self):
tree: ast.Module = ast.parse(self.source)
comparisons = list(set([node for node in _reduce_module(tree) if isinstance(node, ast.Compare)]))
return comparisons
@staticmethod
def fix_none_comparisons(comparison: ast.Compare) -> ast.Compare:
FIXED_COMP_OPS = {
ast.Eq: ast.Is(),
ast.NotEq: ast.IsNot(),
}
if comparison.ops[0] not in (ast.Is, ast.IsNot):
return ast.Compare(
left=comparison.left,
ops=[FIXED_COMP_OPS.get(type(comparison.ops[0]), comparison.ops[0])],
comparators=comparison.comparators
)
else:
return comparison
class ComparisonFormatter:
def __init__(self, filenames) -> None:
self.filenames = filenames
def format_comparisons(self) -> None:
for filename in self.filenames:
with open(filename, "r+") as file:
comparisonparser = ComparisonParser(file.read())
new_comparisons = [comp for comp in comparisonparser.get_comparisons()]
_replace_tokens(filename, new_comparisons)
|
UTF-8
|
Python
| false | false | 2,271 |
py
| 12 |
comparisons.py
| 12 | 0.590489 | 0.588727 | 0 | 68 | 32.411765 | 132 |
Diego07101985/challenge-bravo
| 15,762,529,996,549 |
9db29cfb82b4addee668d25baa3d1e729785e57d
|
4dd53b57cb310428e07ad85e96f4ef5463e6f366
|
/desafio/currency/repository.py
|
24fa0cf6e1d583d1ac243ab382c9bf0e049d8fde
|
[] |
no_license
|
https://github.com/Diego07101985/challenge-bravo
|
91177ab13899dc492b59c0c7b713dfd7a07cd0a5
|
f8df7e6663ce1ce13d1f7a670acbc85e205c788b
|
refs/heads/master
| 2022-12-01T21:10:04.241867 | 2020-08-17T20:28:12 | 2020-08-17T20:28:12 | 285,547,846 | 0 | 0 | null | true | 2020-08-17T20:24:36 | 2020-08-06T11:01:34 | 2020-08-16T23:30:13 | 2020-08-17T20:24:35 | 74 | 0 | 0 | 0 |
Python
| false | false |
from desafio.currency.model import Currency
from desafio.extensions import session_scope
class CurrencyRepository():
def get_all_currency(self):
with session_scope() as session:
return session.query(Currency).all()
def get_currency_by_simbol_currency(self, currency):
with session_scope() as session:
currency = session.query(Currency).filter(
Currency.simbol_currency == currency.simbol_currency).first()
return currency
def get_currency_by_id(self, currency):
with session_scope() as session:
currency = session.query(Currency).filter(
Currency.id == currency.id).first()
return currency
def insert(self, currency):
with session_scope() as session:
session.add(currency)
currency = session.query(Currency).filter(
Currency.simbol_currency == currency.simbol_currency).first()
print('Insert', currency)
return currency.id
def update(self, currency):
with session_scope() as session:
session.query(Currency).filter(
Currency.id == currency.id).update(
{"simbol_currency": currency.simbol_currency,
"name_description": currency.name_description}
)
update_currency = session.query(Currency).filter(
Currency.simbol_currency == currency.simbol_currency).first()
return update_currency
def delete(self, currency):
with session_scope() as session:
currency = session.query(Currency).filter(
currency.simbol_currency == currency.simbol_currency).first()
session.delete(currency)
currency = session.query(Currency).filter(
currency.simbol_currency == currency.simbol_currency).first()
return currency is None
|
UTF-8
|
Python
| false | false | 1,923 |
py
| 29 |
repository.py
| 18 | 0.615185 | 0.615185 | 0 | 49 | 38.244898 | 77 |
nelli369/Data_Scraping
| 14,018,773,298,266 |
d511a695f575d9c7b1ac1ac8d137af4f5ed6ae69
|
1c789a24715863c405e194e6e4447cbb652d0737
|
/BooksToScrape3.py
|
c4c28ff8d2c15cbe5c630029ceb74ce008a0370e
|
[] |
no_license
|
https://github.com/nelli369/Data_Scraping
|
b14522a9089d7d52f575ed761c368b5f7d546692
|
60f27562660a54ac23a25e65a253c38b404c7f2e
|
refs/heads/master
| 2022-11-14T10:08:18.497811 | 2020-07-01T14:05:13 | 2020-07-01T14:05:13 | 274,640,854 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
# -*- coding: utf-8 -*-
"""DS hmk2 pr3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16q0T7UI1N82myYQ1iyOyCiLJKZ5j-pM_
"""
!pip install scrapy
import time
import requests
import numpy as np
import pandas as pd
from scrapy.http import TextResponse
import re
def books_scraper(url,base_url="http://books.toscrape.com/"):
page = requests.get(url)
response = TextResponse(body=page.text,url=url,encoding="utf-8")
title = response.css("h3 > a::attr(title)").extract()
price_l = response.css("p.price_color::text").extract()
price = [i.replace("A", "") for i in price_l]
book_url = response.css("h3 >a::attr(href)").extract()
picture_url = response.css("img::attr(src)").extract()
star = response.css("p[class^='star-rating']::attr(class)").extract()
star_rating = []
for i in star:
star_rating.append(i.replace("star-rating", ""))
stock =response.css("p.price_color ~ p[class^='instock']::attr(class)").extract()
instock = [i.replace("availability", " ") for i in stock]
base_url = "http://books.toscrape.com/catalogue/"
bookurl = [base_url + i for i in book_url]
picurl = [base_url + i for i in picture_url]
return pd.DataFrame({"Title":title, "Price":price, "Book_urls":bookurl, "Image_urls":picurl,"Star_Rating":star_rating,"Instock":instock})
books = []
for i in range(1,1000):
pages =books_scraper(url = f"http://books.toscrape.com/catalogue/page-{i}.html")
if pages.shape[0] == 0:
break
else:
books.append(pages)
books = pd.concat(books)
books
|
UTF-8
|
Python
| false | false | 1,635 |
py
| 9 |
BooksToScrape3.py
| 4 | 0.658104 | 0.644648 | 0 | 48 | 33.041667 | 142 |
wpovell/twitter-kov
| 1,941,325,217,831 |
0776c314498d9efea6a0a49f365091f7382fe028
|
16d8ed403af2184c9db4026cc84b9fd040beeeea
|
/TwitterKov/util/recall.py
|
67cb907bc1d69b0bbb67d387750abde4a84a5326
|
[] |
no_license
|
https://github.com/wpovell/twitter-kov
|
a51267e026aae983b0579d2f193d8c2e98f2f61b
|
371a7894783d33e68d64d67feca9379ba166017a
|
refs/heads/master
| 2016-09-06T15:13:55.346790 | 2014-10-11T04:04:57 | 2014-10-11T04:04:57 | 11,493,473 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import json
import os
import datetime
import inspect
import importlib
class recall:
def __init__(self, fname=None):
self.callingFile = inspect.stack()[1][1]
if fname is None:
self.fname = ''.join(self.callingFile.split('.')[:-1]) + '.log'
else:
self.fname = '/'.join(self.callingFile.split('/')[:-1]) + '/' + fname
def __call__(self, func):
class ret:
def __init__(self, fname, callingFile):
self.original_func = func
self.fname = fname
self.callingFile = callingFile
def original(self, *args, **kwargs):
self.original_func(*args, **kwargs)
def __call__(self, *args, **kwargs):
if os.path.isfile(self.fname):
with open(self.fname) as f:
history = json.load(f)
else:
history = []
history.insert(0, {
'cwd': os.getcwd(),
'file': self.callingFile,
'funcname': func.__name__,
'time':str(datetime.datetime.now()),
'args':args,
'kwargs':kwargs
})
with open(self.fname, 'w') as f:
json.dump(history, f)
return func(*args, **kwargs)
return ret(self.fname, self.callingFile)
if __name__ == '__main__':
from sys import argv
history = 0
if len(argv) > 1:
fileName = argv[1]
if not os.path.isfile(fileName):
print("Log file does not exist.")
exit(1)
if len(argv) > 2 and argv[2].isdigit():
history = int(argv[2])
else:
print("No log file provided.")
exit(1)
direct = os.path.dirname(fileName)
with open(fileName) as f:
data = json.load(f)
data = data[history]
rpath = ''.join(os.path.relpath(data['file'], data['cwd']).split('.')[:-1])
func = getattr(importlib.import_module(rpath.replace('/', '.')), data['funcname'])
func.original(*data['args'], **data['kwargs'])
|
UTF-8
|
Python
| false | false | 2,152 |
py
| 31 |
recall.py
| 30 | 0.486524 | 0.480019 | 0 | 61 | 34.295082 | 86 |
AlanShea/Comsci_12700-Assignments
| 5,686,536,727,560 |
4cc32122f62e64c88de7460ef3afa4d0c227dde3
|
ae6f66a3d1e667ae94e282a039bd1cb140310dbb
|
/32. Air Pollution.py
|
6a385332b1cc886a1698b2ee26a2bdad0b975ebe
|
[] |
no_license
|
https://github.com/AlanShea/Comsci_12700-Assignments
|
f76dfd8a0af82b8d0a789bb01d8e882935237679
|
1bb67f2397f114919a1f36fd6826b64d1bcdb07e
|
refs/heads/master
| 2023-02-06T01:10:25.500465 | 2020-12-19T01:02:07 | 2020-12-19T01:02:07 | 322,739,064 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
#Name: Alan Q. Shea
#Email: [email protected]
#Date: October 21, 2020
import pandas as pd
import matplotlib.pyplot as plt
fileIn = input("Enter name of input file: ")
fileOut = input("Enter name of output file: ")
data = pd.read_csv(fileIn)
avgAsthma = data.groupby("geo_entity_name").mean()["data_valuemessage"]
avgAsthma.plot.bar()
plt.gcf().subplots_adjust(bottom = 0.5)
dataGraph = plt.gcf()
dataGraph.savefig(fileOut)
|
UTF-8
|
Python
| false | false | 445 |
py
| 66 |
32. Air Pollution.py
| 51 | 0.721348 | 0.698876 | 0 | 18 | 23.722222 | 71 |
DarthThanatos/QuantumSimulator
| 11,647,951,338,360 |
2ed27490a2dab502938bf45becc51ea009a02d49
|
11e43f083afad6e84c947332bb60327a1539b836
|
/view/ProbabilitiesTable.py
|
59089e844d407cf43318d949b87be75248df3a44
|
[] |
no_license
|
https://github.com/DarthThanatos/QuantumSimulator
|
e33a226efc06e08ccda44a5ccb5727ae31dbfbab
|
9f05113b9b0c980536afb9bddca6a9596afdb459
|
refs/heads/master
| 2020-04-02T15:23:45.380210 | 2019-06-02T08:26:23 | 2019-06-02T08:26:23 | 154,566,220 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import wx
import wx.grid
class ButtonRenderer(wx.grid.GridCellRenderer):
def __init__(self, state_details_fun):
wx.grid.GridCellRenderer.__init__(self)
self.down = False
self.click_handled = False
self.__state_details_fun = state_details_fun
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
if self.down:
state = wx.CONTROL_PRESSED | wx.CONTROL_SELECTED
else:
state = 0
x,y = rect.GetTopLeft()
tw, th = dc.GetTextExtent("argand")
w, h = rect.GetWidth(), rect.GetHeight()
wx.RendererNative.Get().DrawPushButton(grid, dc, rect, state)
dc.DrawText("argand", x + w/2 - tw/2, y + h/2 - th/2)
if self.down and not self.click_handled:
self.click_handled = True
self.HandleClick()
def HandleClick(self):
self.__state_details_fun()
def GetBestSize(self, grid, attr, dc, row, col):
text = grid.GetCellValue(row, col)
dc.SetFont(attr.GetFont())
w, h = dc.GetTextExtent(text)
return wx.Size(w, h)
def Clone(self):
return ButtonRenderer(self.__state_details_fun)
class StateDetailsFun:
def __init__(self, amplitude, probabilities_mediator):
self.__amplitude = amplitude
self.__probabilities_mediator = probabilities_mediator
def __call__(self):
self.__probabilities_mediator.visualise_amplitude(self.__amplitude)
COLUMNS = 4
class ProbabilitiesTable(wx.grid.Grid):
def __init__(self, parent, register_representation, probabilities_mediator):
wx.grid.Grid.__init__(self, parent)
self.__probabilities_mediator = probabilities_mediator
self.CreateGrid(len(register_representation), COLUMNS)
self.__renderer = ButtonRenderer(StateDetailsFun(0, self.__probabilities_mediator))
for i, row_representation in enumerate(register_representation):
self.__update_row(row_representation, i)
self.__setup_titles()
self.SetRowLabelSize(0)
self.SetGridLineColour(wx.WHITE)
self.GetGridWindow().Bind(wx.EVT_LEFT_DOWN, self.__on_left_down)
self.GetGridWindow().Bind(wx.EVT_LEFT_UP, self.__on_left_up)
self.Bind(wx.grid.EVT_GRID_ROW_SIZE, self.__on_resize)
self.Bind(wx.grid.EVT_GRID_COL_SIZE, self.__on_resize)
def __on_resize(self, _):
self.__probabilities_mediator.probs_table_resized()
def __update_row(self, row_representation, i):
for j in range(COLUMNS):
self.SetReadOnly(i, j, True)
if j == 4:
pass
# self.SetCellRenderer(i, 4, ButtonRenderer(StateDetailsFun(complex(row_representation[3]), self.__probabilities_mediator)))
else:
self.SetCellAlignment(i, j, wx.ALIGN_CENTRE, wx.ALIGN_CENTRE)
self.SetCellValue(i, j, row_representation[j])
def __setup_titles(self):
titles = ["val", "qubits", "prob", "ampl", ""]
for i in range(4):
self.SetColLabelValue(i, titles[i])
def __on_left_down(self, evt):
col, row = self.__hit_test_cell(evt.GetPosition().x, evt.GetPosition().y)
if isinstance(self.GetCellRenderer(row, col), ButtonRenderer):
self.GetCellRenderer(row, col).down = True
self.Refresh()
evt.Skip()
def __on_left_up(self, evt):
col, row = self.__hit_test_cell(evt.GetPosition().x, evt.GetPosition().y)
if isinstance(self.GetCellRenderer(row, col), ButtonRenderer):
self.GetCellRenderer(row, col).down = False
self.GetCellRenderer(row, col).click_handled = False
self.Refresh()
evt.Skip()
def __hit_test_cell(self, x, y):
x, y = self.CalcUnscrolledPosition(x, y)
return self.XToCol(x), self.YToRow(y)
if __name__ == '__main__':
app = wx.App(0)
frame = ProbabilitiesTable(None, [], None)
app.MainLoop()
|
UTF-8
|
Python
| false | false | 3,968 |
py
| 107 |
ProbabilitiesTable.py
| 93 | 0.612651 | 0.609375 | 0 | 110 | 35.081818 | 140 |
vishnun/python-programs
| 8,744,553,434,865 |
f85392f60b2534a32bfcdfcb32c28157917fe061
|
c7ffca464ef7bef4f64df5b471c9b6eca2c80e29
|
/string_to_int.py
|
953d298b3ab4a492cbea341cf52caabb5c24b119
|
[
"MIT"
] |
permissive
|
https://github.com/vishnun/python-programs
|
2f3f2d6fc4a8ec3268a111d134dd57bb8d32df77
|
8e27f88a2a7174a0ca10596a328c4031bedd602c
|
refs/heads/master
| 2021-01-11T18:47:25.801435 | 2017-02-05T07:10:08 | 2017-02-05T07:10:08 | 79,625,310 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
def is_valid_digit(digit):
return 0 <= digit <= 9
def str_to_int(num_str):
num_str, sign = get_sign(num_str)
value = 0
for digit in num_str:
digit = ord(digit) - ord('0')
if is_valid_digit(digit):
value = value * 10 + digit
else:
return "Not a number"
if sign == '-':
value = -value
return value
def get_sign(num_str):
sign = '+'
if num_str[0] == '-' or num_str[0] == '+':
sign = num_str[0]
num_str = num_str[1:]
return num_str, sign
|
UTF-8
|
Python
| false | false | 465 |
py
| 18 |
string_to_int.py
| 17 | 0.587097 | 0.565591 | 0 | 24 | 18.375 | 43 |
jordan-simonovski/seefood-slackbot
| 17,265,768,555,272 |
69ff4ddd51adb54ba6453f0628f39fe6527bc071
|
ce62f2bf8e6495d62223831611e6903f149f394e
|
/modules/slackMessage.py
|
5e9ae0f544fab3c547e1aea5958d1c30ac2cf30b
|
[] |
no_license
|
https://github.com/jordan-simonovski/seefood-slackbot
|
c3e57239fdbb206502509d9541d3c8a7867b1688
|
be639afc91995cd088751544547a1eb29aff36e2
|
refs/heads/master
| 2021-01-21T11:37:39.572209 | 2017-05-23T12:45:31 | 2017-05-23T12:45:31 | 91,748,549 | 6 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import requests
import json
def buildSlackMessage(imageUrl, hotdogAnswer):
slackMessage = {
"response_type": "in_channel",
"attachments": [{
"title": hotdogAnswer,
"fallback": "This is going to rollback your deployment",
"color": "#3AA3E3",
"attachment_type": "default",
"image_url": imageUrl
}]
}
return slackMessage
def notifySlackChannel(requestForm, hotdogAnswer):
myMessage = buildSlackMessage(requestForm['text'], hotdogAnswer)
return myMessage
# webHookUrl = requestForm['response_url']
# response = requests.post(webHookUrl, data=json.dumps(myMessage))
# if response.status_code == 200:
# return "ok"
# return "there was an error getting a response from slack"
|
UTF-8
|
Python
| false | false | 799 |
py
| 8 |
slackMessage.py
| 5 | 0.634543 | 0.627034 | 0 | 25 | 31 | 70 |
princeofdatamining/django_celery_management
| 16,621,523,441,809 |
b5b40b1b1f6ffacf4b82a767b5af37927d636039
|
86e7f45989e4081fdc7d1676043839560e7dec72
|
/django_celery_management/__init__.py
|
80e48593a02c51b0b61679e79c600b2c387b6c26
|
[] |
no_license
|
https://github.com/princeofdatamining/django_celery_management
|
d861f537c91202a35cc2f7bed8a14ca4473552d8
|
9ef99a4ecb30d16f5bfe3d24141be617308e4308
|
refs/heads/master
| 2020-09-20T05:34:13.911441 | 2020-06-29T07:20:08 | 2020-06-29T07:20:08 | 94,501,014 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
__version__ = "1.0.0"
default_app_config = 'django_celery_management.apps.django_celery_managementAppConfig'
|
UTF-8
|
Python
| false | false | 110 |
py
| 6 |
__init__.py
| 6 | 0.754545 | 0.727273 | 0 | 3 | 35.666667 | 86 |
peteranny/research
| 18,554,258,728,692 |
f3a0706a68857bc36d682b4b4ed2de90792c0f63
|
327ce897521087bf9a3d44dd0bc2377e85bb8425
|
/lib/factory.py
|
29606d6cf508e6094a2adea43cf88f1105275799
|
[] |
no_license
|
https://github.com/peteranny/research
|
75f1fdefe322049c3344ececfd6405fbb562cd95
|
f2674f536d0f758682a1dc195cadcd577205390a
|
refs/heads/master
| 2021-01-23T08:39:58.492377 | 2015-01-18T17:07:07 | 2015-01-18T17:07:07 | 27,134,312 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
'''
@ Ting-Yi Shih
#Usage Example
from factory import Factory
Factory(source=[1,2,3], filename="factory_out", func=lambda x:str(x), num_workers=1).run()
source: a list/an iterator/a generator of items
filename: output filename
func: function that returns a string as the result of the item
can take either 1 argument: item, or take 2 arguments: item and fparam
item: 1 out of source
fparam(optional): parameters
fparam: parameters to this function shared by all items (None by default)
if func takes exactly 1 argument, fparam must be None
if func takes exactly 2 arguments, fparam must not be None
num_worker: number of process to be executed
'''
from __future__ import division
class Factory:
def __init__(this, source=[], filename="factory_out", func=lambda x:str(x), fparam=None, num_workers=1):
this.source = source
this.fout_name = filename
this.func = func
this.num_workers = num_workers
this.fparam = fparam
@staticmethod
def worker(worker_id, in_queue, func, fparam, fout): # make args a tuple
while True:
item = in_queue.get()
result = func(item, fparam) if fparam else func(item) # input=item,args output=string
fout.write("%s\n"%result)
fout.flush()
in_queue.task_done()
@staticmethod
def progressor(in_queue,total_num):
if total_num==0: return
import time,sys
while True:
Factory.pg("%5.1f%%..."%(100-in_queue.qsize()/total_num*100))
time.sleep(1)
@staticmethod
def pg(msg, br=False):
import sys
sys.stderr.write("\rFactory: "+msg+"\033[K"+("\n" if br else ""))
sys.stderr.flush()
def test(this):
try:
this.source = iter(this.source)
except:
raise Exception("source should be a iterable")
import inspect
if this.fparam is None:
if len(inspect.getargspec(this.func).args)!=1:
raise Exception("function should take exactly 1 argument: item")
if this.fparam is not None:
if len(inspect.getargspec(this.func).args)!=2:
raise Exception("function should take exactly 2 arguments:item and parameters")
if this.fout_name=="":
raise Exception("filename cannot be an empty string")
if type(this.num_workers)!=type(1) or this.num_workers<=0:
raise Exception("invalid value of num_workers")
try:
item = next(this.source)
except:
raise Exception("source is empty")
result = this.func(item, this.fparam) if this.fparam else this.func(item)
if type(result)!=type(""):
raise Exception("function should return a string")
with open("%s_part"%this.fout_name, "w") as fout:
fout.write("%s\n"%result)
fout.flush()
def run(this):
Factory.pg("configuration test...")
this.test() # check configuration
source = this.source
fout_name = this.fout_name
func = this.func
fparam = this.fparam
num_workers = this.num_workers
worker = this.worker
progressor = this.progressor
# queue settings
Factory.pg("arranging source elements...")
from multiprocessing import JoinableQueue,Process
in_queue = JoinableQueue()
for item in source:
in_queue.put(item)
# worker progressing
progressor = Process(target=progressor, args=(in_queue, in_queue.qsize()))
import time
start_time = time.time()
progressor.start()
# worker settings
fouts, workers = [], []
for w_id in xrange(num_workers):
fouts.append(open("%s_part%d"%(fout_name,w_id),"w"))
workers.append(Process(target=worker, args=(w_id, in_queue, func, fparam, fouts[w_id])))
workers[w_id].start()
# post processing
in_queue.join()
for w_id in xrange(num_workers):
workers[w_id].terminate()
progressor.terminate()
end_time = time.time()
Factory.pg("working done (%.1fs lapsed)"%(end_time - start_time), br=True)
import os
os.system("cat %s_part* > %s"%(fout_name,fout_name))
os.system("rm -f %s_part*"%(fout_name))
# useful when the output is not str (not recommended)
def obj2line(obj):
return str(obj)
def line2obj(line):
import ast
return ast.literal_eval(line)
# useful when batch processing (1 result for all instead of for each) (not recommended)
def src2chunk(source, nChunks):
import math
source = list(source)
chunk_size = int(math.ceil(len(source)/nChunks))
return [source[i:i+chunk_size] for i in xrange(0,len(source),chunk_size)]
def mergeLines(filename, res, update):
with open(filename, "r") as fin:
for line in fin:
res_prt = line2obj(line)
res = update(res,res_prt)
with open(filename, "w") as fout:
fout.write(obj2line(res))
if __name__ == "__main__":
def simple(x,fparam):
import time
time.sleep(fparam)
return str(x)
Factory(source=range(10), filename="factory_out", func=simple, fparam=3, num_workers=5).run()
|
UTF-8
|
Python
| false | false | 5,247 |
py
| 38 |
factory.py
| 22 | 0.611016 | 0.603202 | 0 | 150 | 33.973333 | 108 |
ruyfreire/python-introducao
| 8,504,035,253,532 |
5168794c689ad3c0d48c2b75fa24d232a8267be6
|
e2adbbc7512933027100747db1db79653b45a716
|
/sessao4/02_metodos/get_set.py
|
2589c60e6f9d39bb760a25e32024f1142d3103e3
|
[] |
no_license
|
https://github.com/ruyfreire/python-introducao
|
128dfeb24f7981a3b16891bf8c421e7a2767fee9
|
9088d216de7fe940f523b2a3c817fcc340aba730
|
refs/heads/master
| 2023-05-29T10:13:58.801677 | 2021-04-18T18:29:15 | 2021-04-18T18:29:15 | 277,381,147 | 0 | 0 | null | false | 2021-06-09T00:41:30 | 2020-07-05T20:49:51 | 2021-06-09T00:41:07 | 2021-06-08T23:21:50 | 11,942 | 0 | 0 | 1 |
Python
| false | false |
"""
Métodos Getter e Setter
- Getter
Métodos getter obtém o valor de um atributo da instancia,
e possuem o decorador '@property'
ex:
class MinhaClasse:
def __init__(self, nome):
self.nome = nome
@property
def nome(self):
return self._nome
- Setter
Métodos setter inserem o valor de um atributo da instancia,
e possuem o decorador '@[nome_do_atributo].setter'
ex:
class MinhaClasse:
def __init__(self, nome):
self.nome = nome
@nome.setter
def nome(self, valor):
self._nome = valor
"""
from random import randint
class Produto:
def __init__(self, nome):
self._nome = nome
@property
def nome(self):
id = randint(100, 999)
return f'{id}_{self._nome}'
@nome.setter
def nome(self, valor):
self._nome = valor
print('\n##### Inserindo valor com setter #####')
nome = 'Caneca'
print(nome)
p1 = Produto(nome)
print('\n##### Obtendo valor SEM o getter #####')
print(p1._nome)
print('\n##### Obtendo valor COM o getter #####')
print(p1.nome)
|
UTF-8
|
Python
| false | false | 1,054 |
py
| 160 |
get_set.py
| 128 | 0.614286 | 0.605714 | 0 | 54 | 18.444444 | 59 |
FrauElster/ocr_test
| 5,712,306,548,340 |
3d7035159fe5fc03521eb4d9a4dda4cbb640b702
|
fe0d709d75885c2e0250c1fb1dd18b1945d7b7c9
|
/ocr-test/create_pdf.py
|
89e7a1824ace649682ae5750724622226c5f280e
|
[] |
no_license
|
https://github.com/FrauElster/ocr_test
|
2845ab0c0d73d274961f2d46c28dfe242d6a15b7
|
8d0cd2df411fd45f18dbee1d8ab1c02ce352d364
|
refs/heads/master
| 2022-02-13T20:44:14.378611 | 2019-07-31T10:50:43 | 2019-07-31T10:50:43 | 196,885,809 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
import os
from fpdf import FPDF
import glob
from typing import List
from .FileHandler import FileHandler
FONTS = ['Courier', 'Arial', 'Times']
def main(min_font_size: int, max_font_size: int):
assert min_font_size <= max_font_size
lorem: str = FileHandler.load_file(FileHandler.get_path('../lorem.txt'))
create_pdfs(min_font_size, max_font_size, lorem)
FileHandler.delete_with_ending('../fonts', 'pkl')
def create_pdfs(min_font_size: int, max_font_size: int, text: str):
special_fonts = get_special_fonts()
for font_size in range(min_font_size, max_font_size + 1):
for font in FONTS:
pdf = FPDF('P', 'mm', 'A4')
pdf.set_font(font)
make_pdf(pdf, font, font_size, text)
for special_font in special_fonts:
pdf = FPDF('P', 'mm', 'A4')
pdf.add_font(family=special_font, fname=special_font, uni=True)
print(f'Special Font: {special_font}')
pdf.set_font(special_font)
font_name = special_font.split('/')[-1]
font_name = font_name.split('.')[0]
make_pdf(pdf, font_name, font_size, text)
def get_special_fonts():
os.environ['FPDF_FONTPATH'] = FileHandler.get_path('../fonts')
os.environ['FPDF_CACHE_MODE'] = FileHandler.get_path('1')
special_fonts: List[str] = []
# for filename in glob.glob(os.path.join(FileHandler.get_path('../fonts'), '*.ttf')):
for filename in glob.glob(os.path.join(FileHandler.get_path('../fonts'), '*.ttf')):
print(f'DEBUG: {filename}')
special_fonts.append(filename)
return special_fonts
def make_pdf(pdf, font_name: str, font_size: int, lorem: str):
dir_path = FileHandler.get_path('../out_create')
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
title = f'{font_name}_{font_size}.pdf'
pdf.add_page()
pdf.set_font_size(font_size)
pdf.multi_cell(150, 10, txt=lorem)
pdf.output(f'{dir_path}/{title}')
print(f'Created {title}')
|
UTF-8
|
Python
| false | false | 2,003 |
py
| 186 |
create_pdf.py
| 5 | 0.614578 | 0.609086 | 0 | 59 | 32.949153 | 92 |
smautner/pig
| 18,837,726,560,357 |
55452bd17fc5f1fac5f55683f85acdbf577409bd
|
9419966d7debad3ed14738c90ad5020fe59713cf
|
/yoda/nanaman/milad_data.py
|
f70b7d8b556f4de4791357c364d6239e57fb1526
|
[] |
no_license
|
https://github.com/smautner/pig
|
378ef98a8d90e60a8e304536495e5d9fc4664a2f
|
784877db54d078b4ccca839ef9b3d77c3b032508
|
refs/heads/master
| 2023-08-30T23:44:05.986116 | 2023-08-16T12:10:21 | 2023-08-16T12:10:21 | 209,278,646 | 1 | 2 | null | false | 2020-11-09T13:26:13 | 2019-09-18T10:13:27 | 2020-11-04T14:41:59 | 2020-11-09T13:26:12 | 4,316 | 1 | 1 | 0 |
Jupyter Notebook
| false | false |
from lmz import Map,Zip,Filter,Grouper,Range,Transpose
import yoda.filein
from yoda import filein, alignment, ali2graph , simpleMl
from ubergauss import tools as ut
import eden.graph as eg
def rfamOme_labeldistribution():
'''just looks at the distribution of labels in the rfamome dataset'''
import glob
from collections import Counter
f = glob.glob(f'../../rfamome/*.stk')
m = map(alignment.grepfamily, f) # the name is in the filename
c = Counter(m)
print(sum([v for v in c.values() if v > 2]))
print(c)
def runbaseline():
alis = filein.loadrfamome(path = f'../../rfamome')
alis = filein.addstructure(alis) # a.struct
alis = filein.addcov(alis) # a.rscape [(start,stop,e-valus)]
alis = yoda.filein.process_cov(alis, debug = False)
graphs = Map(ali2graph.scclust,alis) # not done!
X = eg.vectorize(graphs)
y = [a.label for a in alis]
# 1nn check
print(f"{simpleMl.knn_accuracy(X,y,n_neighbors = 1)=}")
print(f"{simpleMl.knn_f1(X,y,n_neighbors = 1,cv_strati_splits = 3)=}")
# cv kmeans clustering
print(f"{simpleMl.kmeans_ari(X,y,k=28)=}")
# loguru!
# so.grpint pos RNA
# https://github.com/fabriziocosta/EDeN/blob/master/eden/display/__init__.py
# vectorizer should have many options... why are the fake nodes decreasing performance?
# especially removing the fakenodes is improving performance, with the so.rna stuff
# i should be able to debug and see if i implemented it correctly
# experiment a bit with this dataset...
# then build the rfam tree or cluster the clan ones!
|
UTF-8
|
Python
| false | false | 1,609 |
py
| 38 |
milad_data.py
| 31 | 0.675575 | 0.66936 | 0 | 48 | 32.520833 | 91 |
payerhans/patienten
| 5,987,184,412,324 |
f79cb14a6318a44e33f4ada2c9ca0c241b264a1d
|
66576843eff9f518131c27cd9c92504a0b6dbd5b
|
/vvt/apps.py
|
7d47cbdee7d8951e8ecfeb0afdebf24ec27234ac
|
[] |
no_license
|
https://github.com/payerhans/patienten
|
431049d99e295e9d59f9516440fcdad46ae9bb9b
|
3c490762a9dd666a35ba6619713780dc6515990f
|
refs/heads/master
| 2020-03-11T03:14:12.796823 | 2018-05-28T14:41:23 | 2018-05-28T14:41:23 | 129,741,162 | 0 | 0 | null | null | null | null | null | null | null | null | null | null | null | null | null |
from django.apps import AppConfig
class VvtConfig(AppConfig):
name = 'vvt'
|
UTF-8
|
Python
| false | false | 81 |
py
| 36 |
apps.py
| 15 | 0.728395 | 0.728395 | 0 | 5 | 15.2 | 33 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.