hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a96a8a9570ed3b24a4bfee94944da9262d1bde3
| 449
|
py
|
Python
|
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | 1
|
2020-09-09T02:34:28.000Z
|
2020-09-09T02:34:28.000Z
|
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nopipifish/bert4keras
|
d8fd065b9b74b8a82b381b7183f9934422e4caa9
|
[
"Apache-2.0"
] | null | null | null |
#! -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='bert4keras',
version='0.8.4',
description='an elegant bert4keras',
long_description='bert4keras: https://github.com/bojone/bert4keras',
license='Apache License 2.0',
url='https://github.com/bojone/bert4keras',
author='bojone',
author_email='[email protected]',
install_requires=['keras<=2.3.1'],
packages=find_packages()
)
| 26.411765
| 72
| 0.674833
| 56
| 449
| 5.321429
| 0.642857
| 0.080537
| 0.09396
| 0.134228
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036842
| 0.153675
| 449
| 16
| 73
| 28.0625
| 0.747368
| 0.051225
| 0
| 0
| 0
| 0
| 0.411765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a98cfd9f20dfc0c1b38e64c743a29230c7a8c4f
| 195
|
py
|
Python
|
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
whoPay.py
|
susurigirl/susuri
|
cec96cc9abd5a25762e15db27c17e70a95ae874c
|
[
"MIT"
] | null | null | null |
import random
names_string = input("๋ด๊ธฐ๋ฅผ ํ ์น๊ตฌ๋ค์ ์ด๋ฆ์ ์ ์ต๋๋ค. ์ฝค๋ง(,)๋ก ๋ถ๋ฆฌํด์ ์ ์ต๋๋ค.\n")
names = names_string.split(",")
print(names)
n = random.randint(0, len(names))
print(f"์ค๋ ์ปคํผ๋ {names[n]}๊ฐ ์ฉ๋๋ค!")
| 19.5
| 64
| 0.676923
| 35
| 195
| 3.714286
| 0.685714
| 0.169231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.138462
| 195
| 9
| 65
| 21.666667
| 0.767857
| 0
| 0
| 0
| 0
| 0
| 0.328205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a99a93e656914b21bfd27861c1447d786a91bee
| 2,929
|
py
|
Python
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 838
|
2017-07-14T10:08:13.000Z
|
2022-03-22T22:09:14.000Z
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 395
|
2017-08-18T15:56:17.000Z
|
2022-03-20T11:28:23.000Z
|
MicroPython_BUILD/components/micropython/esp32/modules_examples/mqtt_example.py
|
FlorianPoot/MicroPython_ESP32_psRAM_LoBo
|
fff2e193d064effe36a7d456050faa78fe6280a8
|
[
"Apache-2.0"
] | 349
|
2017-09-02T18:00:23.000Z
|
2022-03-31T23:26:22.000Z
|
import network
def conncb(task):
print("[{}] Connected".format(task))
def disconncb(task):
print("[{}] Disconnected".format(task))
def subscb(task):
print("[{}] Subscribed".format(task))
def pubcb(pub):
print("[{}] Published: {}".format(pub[0], pub[1]))
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
mqtt = network.mqtt("loboris", "mqtt://loboris.eu", user="wifimcu", password="wifimculobo", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# secure connection requires more memory and may not work
# mqtts = network.mqtt("eclipse", "mqtts//iot.eclipse.org", cleansession=True, connected_cb=conncb, disconnected_cb=disconncb, subscribed_cb=subscb, published_cb=pubcb, data_cb=datacb)
# wsmqtt = network.mqtt("eclipse", "ws://iot.eclipse.org:80/ws", cleansession=True, data_cb=datacb)
mqtt.start()
#mqtt.config(lwt_topic='status', lwt_msg='Disconected')
'''
# Wait until status is: (1, 'Connected')
mqtt.subscribe('test')
mqtt.publish('test', 'Hi from Micropython')
mqtt.stop()
'''
# ==================
# ThingSpeak example
# ==================
import network
def datacb(msg):
print("[{}] Data arrived from topic: {}, Message:\n".format(msg[0], msg[1]), msg[2])
thing = network.mqtt("thingspeak", "mqtt://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb)
# or secure connection
#thing = network.mqtt("thingspeak", "mqtts://mqtt.thingspeak.com", user="anyName", password="ThingSpeakMQTTid", cleansession=True, data_cb=datacb)
thingspeakChannelId = "123456" # enter Thingspeak Channel ID
thingspeakChannelWriteApiKey = "ThingspeakWriteAPIKey" # EDIT - enter Thingspeak Write API Key
thingspeakFieldNo = 1
thingSpeakChanelFormat = "json"
pubchan = "channels/{:s}/publish/{:s}".format(thingspeakChannelId, thingspeakChannelWriteApiKey)
pubfield = "channels/{:s}/publish/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
subchan = "channels/{:s}/subscribe/{:s}/{:s}".format(thingspeakChannelId, thingSpeakChanelFormat, thingspeakChannelWriteApiKey)
subfield = "channels/{:s}/subscribe/fields/field{}/{:s}".format(thingspeakChannelId, thingspeakFieldNo, thingspeakChannelWriteApiKey)
thing.start()
tmo = 0
while thing.status()[0] != 2:
utime.sleep_ms(100)
tmo += 1
if tmo > 80:
print("Not connected")
break
# subscribe to channel
thing.subscribe(subchan)
# subscribe to field
thing.subscribe(subfield)
# publish to channel
# Payload can include any of those fields separated b< ';':
# "field1=value;field2=value;...;field8=value;latitude=value;longitude=value;elevation=value;status=value"
thing.publish(pubchan, "field1=25.2;status=On line")
# Publish to field
thing.publish(pubfield, "24.5")
| 33.284091
| 216
| 0.712188
| 347
| 2,929
| 5.965418
| 0.357349
| 0.02657
| 0.028986
| 0.031884
| 0.329469
| 0.315942
| 0.315942
| 0.236715
| 0.236715
| 0.236715
| 0
| 0.01428
| 0.115398
| 2,929
| 87
| 217
| 33.666667
| 0.784639
| 0.313418
| 0
| 0.162162
| 0
| 0
| 0.252552
| 0.11338
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0.054054
| 0.054054
| 0
| 0.216216
| 0.189189
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9a47e3f3a1f529a8e26eeea21042cb90395afd
| 585
|
py
|
Python
|
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
mlb/game/migrations/0009_game_game_type.py
|
atadams/mlb
|
633b2eb53e5647c64a48c31ca68a50714483fb1d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.8 on 2019-12-14 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0008_auto_20191214_1019'),
]
operations = [
migrations.AddField(
model_name='game',
name='game_type',
field=models.CharField(choices=[('E', 'Exhibition'), ('S', 'Spring Training'), ('R', 'Regular Season'), ('F', 'Wild Card'), ('D', 'Divisional Series'), ('L', 'League Championship Series'), ('W', 'World Series')], default='R', max_length=30),
),
]
| 30.789474
| 253
| 0.589744
| 67
| 585
| 5.059701
| 0.820896
| 0.047198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073009
| 0.22735
| 585
| 18
| 254
| 32.5
| 0.676991
| 0.076923
| 0
| 0
| 1
| 0
| 0.280669
| 0.042751
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9a9f93de2f3ba2e7d9c2affc936358894ee511
| 36,217
|
py
|
Python
|
backend/main/chapters/c06_lists.py
|
Vman45/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | null | null | null |
backend/main/chapters/c06_lists.py
|
Vman45/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | 1
|
2022-02-28T01:35:27.000Z
|
2022-02-28T01:35:27.000Z
|
backend/main/chapters/c06_lists.py
|
suchoudh/futurecoder
|
0f4abc0ab00ec473e6cf6f51d534ef2deb26a086
|
[
"MIT"
] | null | null | null |
# flake8: NOQA E501
import ast
import random
from textwrap import dedent
from typing import List
from main.exercises import generate_list, generate_string
from main.text import ExerciseStep, MessageStep, Page, Step, VerbatimStep, search_ast
from main.utils import returns_stdout
class IntroducingLists(Page):
class first_list(VerbatimStep):
"""
It's time to learn about a powerful new type of value called lists. Here's an example:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for word in words:
print(word)
class can_contain_anything(VerbatimStep):
"""
A list is a *sequence* (an ordered collection/container) of any number of values.
The values are often referred to as *elements*.
They can be anything: numbers, strings, booleans, even lists! They can also be a mixture of types.
To create a list directly, like above:
1. Write some square brackets: `[]`
2. If you don't want an empty list, write some expressions inside to be the elements.
3. Put commas (`,`) between elements to separate them.
Here's another example of making a list:
__program_indented__
"""
def program(self):
x = 1
things = ['Hello', x, x + 3]
print(things)
class numbers_sum(VerbatimStep):
"""
As you saw above, lists are *iterable*, meaning you can iterate over them with a `for loop`.
Here's a program that adds up all the numbers in a list:
__program_indented__
"""
def program(self):
numbers = [3, 1, 4, 1, 5, 9]
total = 0
for number in numbers:
total += number
print(total)
class strings_sum(ExerciseStep):
"""
Now modify the program so that it can add up a list of strings instead of numbers.
For example, given:
words = ['This', 'is', 'a', 'list']
it should print:
Thisisalist
"""
hints = """
This is very similar to the exercises you've done building up strings character by character.
The solution is very similar to the program that adds numbers.
In fact, what happens if you try running that program with a list of strings?
The problem is that 0. You can't add 0 to a string because numbers and strings are incompatible.
Is there a similar concept among strings to 0? A blank initial value?
"""
@returns_stdout
def solution(self, words: List[str]):
total = ''
for word in words:
total += word
print(total)
tests = [
(['This', 'is', 'a', 'list'], 'Thisisalist'),
(['The', 'quick', 'brown', 'fox', 'jumps'], 'Thequickbrownfoxjumps'),
]
class double_numbers(ExerciseStep):
"""
Optional bonus challenge: extend the program to insert a separator string *between* each word.
For example, given
words = ['This', 'is', 'a', 'list']
separator = ' - '
it would output:
This - is - a - list
Lists and strings have a lot in common.
For example, you can add two lists to combine them together into a new list.
You can also create an empty list that has no elements.
Check for yourself:
numbers = [1, 2] + [3, 4]
print(numbers)
new_numbers = []
new_numbers += numbers
new_numbers += [5]
print(new_numbers)
With that knowledge, write a program which takes a list of numbers
and prints a list where each number has been doubled. For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[6, 2, 8, 2, 10, 18, 4, 12, 10]
"""
hints = """
Remember that you can multiply numbers using `*`.
This program is structurally very similar to the programs you've written to build up strings character by character.
Make a new list, and then build it up element by element in a for loop.
Start with an empty list.
You can make a list with one element `x` by just writing `[x]`.
You can add an element to a list by adding a list containing one element.
"""
@returns_stdout
def solution(self, numbers: List[int]):
double = []
for number in numbers:
double += [number * 2]
print(double)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [6, 2, 8, 2, 10, 18, 4, 12, 10]),
([0, 1, 2, 3], [0, 2, 4, 6]),
]
class filter_numbers(ExerciseStep):
"""
Great!
When you want to add a single element to the end of a list, instead of:
some_list += [element]
it's actually more common to write:
some_list.append(element)
There isn't really a big difference between these, but `.append`
will be more familiar and readable to most people.
Now use `.append` to write a program which prints a list containing only the numbers bigger than 5.
For example, given:
numbers = [3, 1, 4, 1, 5, 9, 2, 6, 5]
it would print:
[9, 6]
"""
hints = """
This is very similar to the previous exercise.
The difference is that sometimes you should skip appending to the new list.
Use an `if` statement.
Use a comparison operator to test if a number is big enough to add.
"""
# TODO enforce not using +=
@returns_stdout
def solution(self, numbers: List[int]):
big_numbers = []
for number in numbers:
if number > 5:
big_numbers.append(number)
print(big_numbers)
tests = [
([3, 1, 4, 1, 5, 9, 2, 6, 5], [9, 6]),
([0, 2, 4, 6, 8, 10], [6, 8, 10]),
]
final_text = """
Fantastic! We're making great progress.
"""
class UsingBreak(Page):
title = "Using `break` to end a loop early"
class list_contains_exercise(ExerciseStep):
"""
Exercise: write a program which takes a list and a value and checks
if the list contains the value. For example, given:
things = ['This', 'is', 'a', 'list']
thing_to_find = 'is'
it should print `True`, but for
thing_to_find = 'other'
it should print `False`.
"""
hints = """
You will need a loop.
You will need an `if` statement.
You will need a comparison operator.
Specifically `==`.
You need a boolean variable that you print at the end.
If you find the element in the list you should set that variable to `True`.
Once you've found the element, you can't unfind it.
That means that once you set the variable to `True`, it should never be set to anything else after that.
Don't use an `else`.
There is no reason to ever set the variable to `False` inside the loop.
"""
@returns_stdout
def solution(self, things, thing_to_find):
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
tests = [
((['This', 'is', 'a', 'list'], 'is'), True),
((['This', 'is', 'a', 'list'], 'other'), False),
(([1, 2, 3, 4], 1), True),
(([1, 2, 3, 4], 0), False),
]
@classmethod
def generate_inputs(cls):
contained = random.choice([True, False])
things = generate_list(int)
if contained:
thing_to_find = random.choice(things)
else:
thing_to_find = random.choice([
min(things) - 1,
max(things) + 1,
])
return dict(
things=things,
thing_to_find=thing_to_find,
)
final_text = """
Nice!
A typical solution looks something like this:
found = False
for thing in things:
if thing == thing_to_find:
found = True
print(found)
Your solution is probably similar. It's fine, but it's a bit inefficient.
That's because it'll loop over the entire list even if it finds the element at the beginning.
You can stop any loop using a `break` statement, like so:
for thing in things:
if thing == thing_to_find:
found = True
break
This is just as correct but skips unnecessary iterations and checks once it finds the element.
You can use snoop to see the difference.
"""
class GettingElementsAtPosition(Page):
title = "Getting Elements at a Position"
class introducing_subscripting(VerbatimStep):
"""
Looping is great, but often you just want to retrieve a single element from the list at a known position.
Here's how:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
print(words[0])
print(words[1])
print(words[2])
print(words[3])
class index_error(Step):
"""
In general, you can get the element at the position `i` with `words[i]`. The operation is called *subscripting* or *indexing*, and the position is called the *index*.
You've probably noticed that the first index is 0, not 1. In programming, counting starts at 0. It seems weird, but that's how most programming languages do it, and it's generally agreed to be better.
This also means that the last index in this list of 4 elements is 3. What happens if you try getting an index greater than that?
"""
program = "words[4]"
def check(self):
return "IndexError" in self.result
class introducing_len_and_range(VerbatimStep):
"""
There you go. `words[4]` and beyond don't exist, so trying that will give you an error.
By the way, you can get the number of elements in a list (commonly called the *length*) using `len(words)`.
That means that the last valid index of the list is `len(words) - 1`, so the last element is `words[len(words) - 1]`. Try these for yourself.
So in general, the valid indices are:
[0, 1, 2, ..., len(words) - 2, len(words) - 1]
There's a handy built in function to give you these values, called `range`:
__program_indented__
"""
def program(self):
for i in range(10):
print(i)
class range_len(VerbatimStep):
"""
`range(n)` is similar to the list `[0, 1, 2, ..., n - 2, n - 1]`.
This gives us an alternative way to loop over a list:
__program_indented__
"""
def program(self):
words = ['This', 'is', 'a', 'list']
for index in range(len(words)):
print(index)
print(words[index])
class index_exercise(ExerciseStep):
"""
Let's get some exercise! Given a list `things` and a value `to_find`,
print the first index of `to_find` in the list, i.e. the lowest number `i` such that
`things[i]` is `to_find`. For example, for
things = ['on', 'the', 'way', 'to', 'the', 'store']
to_find = 'the'
your program should print `1`.
You can assume that `to_find` appears at least once.
"""
hints = """
You will need to look at all the possible indices of `things` and check which one is the answer.
To look at all possible indices, you will need a loop over `range(len(things))`.
To check if an index is the answer, you will need to use:
- `if`
- the index in a subscript
- `==`
Since you're looking for the first index, you need to stop the loop once you find one.
You learned how to stop a loop in the middle recently.
You need to use `break`.
"""
class all_indices(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints all the indices,
not just the first one.
"""
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), "1\n4"),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), "6\n7"),
]
class last_index(MessageStep, ExerciseStep):
"""
You're almost there! However, this prints the *last* index,
not the first one.
"""
@returns_stdout
def solution(self, things, to_find):
answer = None
for i in range(len(things)):
if to_find == things[i]:
answer = i
print(answer)
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 4),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 7),
]
@returns_stdout
def solution(self, things, to_find):
for i in range(len(things)):
if to_find == things[i]:
print(i)
break
tests = [
((['on', 'the', 'way', 'to', 'the', 'store'], 'the'), 1),
(([0, 1, 2, 3, 4, 5, 6, 6], 6), 6),
]
@classmethod
def generate_inputs(cls):
things = generate_list(str)
to_find = generate_string()
things += [to_find] * random.randint(1, 3)
random.shuffle(things)
return dict(
things=things,
to_find=to_find,
)
class zip_exercise(ExerciseStep):
"""
Nice!
By the way, indexing and `len()` also work on strings. Try them out in the shell.
Here's another exercise. Given two strings of equal length, e.g:
string1 = "Hello"
string2 = "World"
print them vertically side by side, with a space between each character:
H W
e o
l r
l l
o d
"""
hints = """
Did you experiment with indexing and `len()` with strings in the shell?
Forget loops for a moment. How would you print just the first line, which has the first character of each of the two strings?
In the second line you want to print the second character of each string, and so on.
You will need a `for` loop.
You will need indexing (subscripting).
You will need `range`.
You will need `len`.
You will need `+`.
You will need to index both strings.
You will need to pass the same index to both strings each time to retrieve matching characters.
"""
@returns_stdout
def solution(self, string1, string2):
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
tests = {
("Hello", "World"): dedent("""\
H W
e o
l r
l l
o d
"""),
("Having", "ablast"): dedent("""\
H a
a b
v l
i a
n s
g t
"""),
}
@classmethod
def generate_inputs(cls):
length = random.randrange(5, 11)
return dict(
string1=generate_string(length),
string2=generate_string(length),
)
class zip_longest_exercise(ExerciseStep):
"""
Incredible!
Your solution probably looks something like this:
for i in range(len(string1)):
char1 = string1[i]
char2 = string2[i]
print(char1 + ' ' + char2)
This doesn't work so well if the strings have different lengths.
In fact, it goes wrong in different ways depending on whether `string1` or `string2` is longer.
Your next challenge is to fix this problem by filling in 'missing' characters with spaces.
For example, for:
string1 = "Goodbye"
string2 = "World"
output:
G W
o o
o r
d l
b d
y
e
and for:
string1 = "Hello"
string2 = "Elizabeth"
output:
H E
e l
l i
l z
o a
b
e
t
h
"""
hints = [
"The solution has the same overall structure and "
"essential elements of the previous solution, "
"but it's significantly longer and will require "
"a few additional ideas and pieces.",
dedent("""
In particular, it should still contain something like:
for i in range(...):
...
print(char1 + ' ' + char2)
"""),
"What should go inside `range()`? Neither `len(string1)` nor `len(string2)` is good enough.",
"You want a loop iteration for every character in the longer string.",
"That means you need `range(<length of the longest string>)`",
"In other words you need to find the biggest of the two values "
"`len(string1)` and `len(string2)`. You've already done an exercise like that.",
"Once you've sorted out `for i in range(...)`, `i` will sometimes be too big "
"to be a valid index for both strings. You will need to check if it's too big before indexing.",
"Remember, the biggest valid index for `string1` is `len(string1) - 1`. "
"`len(string)` is too big.",
"You will need two `if` statements, one for each string.",
"You will need to set e.g. `char1 = ' '` when `string1[i]` is not valid.",
]
# TODO catch user writing string1 < string2
@returns_stdout
def solution(self, string1, string2):
length1 = len(string1)
length2 = len(string2)
if length1 > length2:
length = length1
else:
length = length2
for i in range(length):
if i < len(string1):
char1 = string1[i]
else:
char1 = ' '
if i < len(string2):
char2 = string2[i]
else:
char2 = ' '
print(char1 + ' ' + char2)
tests = {
("Goodbye", "World"): dedent("""\
G W
o o
o r
d l
b d
y
e
"""),
("Hello", "Elizabeth"): dedent("""\
H E
e l
l i
l z
o a
b
e
t
h
"""),
}
@classmethod
def generate_inputs(cls):
length1 = random.randrange(5, 11)
length2 = random.randrange(12, 20)
if random.choice([True, False]):
length1, length2 = length2, length1
return dict(
string1=generate_string(length1),
string2=generate_string(length2),
)
final_text = """
Magnificent! Take a break, you've earned it!
"""
class CallingFunctionsTerminology(Page):
title = "Terminology: Calling functions and methods"
class print_functions(VerbatimStep):
"""
It's time to expand your vocabulary some more.
`print` and `len` are ***functions***. See for yourself:
__program_indented__
"""
def program(self):
print(len)
print(print)
class introducing_callable(VerbatimStep):
"""
An expression like `len(things)` or `print(things)` is a function ***call*** - when you write that, you are ***calling*** the function `len` or `print`. The fact that this is possible means that functions are ***callable***:
__program_indented__
"""
def program(self):
print(callable(len))
class not_callable(VerbatimStep):
"""
Most things are not callable, so trying to call them will give you an error:
__program_indented__
"""
# noinspection PyCallingNonCallable
def program(self):
f = 'a string'
print(callable(f))
f()
class print_returns_none(VerbatimStep):
"""
In the call `len(things)`, `things` is an ***argument***. Sometimes you will also see the word ***parameter***, which means basically the same thing as argument. It's a bit like you're giving the argument to the function - specifically we say that the argument `things` is *passed* to `len`, and `len` *accepts* or *receives* the argument.
`len(things)` will evaluate to a number such as 3, in which case we say that `len` ***returned*** 3.
All calls have to return something...even if it's nothing. For example, `print`'s job is to display something on screen, not to return a useful value. So it returns something useless instead:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment
def program(self):
things = [1, 2, 3]
length = len(things)
printed = print(length)
print(printed)
class len_of_none(VerbatimStep):
"""
`None` is a special 'null' value which can't do anything interesting. It's a common placeholder that represents the lack of a real useful value. Functions that don't want to return anything return `None` by default. If you see an error message about `None` or `NoneType`, it often means you assigned the wrong thing to a variable:
__program_indented__
"""
# noinspection PyNoneFunctionAssignment,PyUnusedLocal,PyTypeChecker
def program(self):
things = print([1, 2, 3])
length = len(things)
class methods_of_str(VerbatimStep):
"""
A ***method*** is a function which belongs to a type, and can be called on all values of that type using `.`. For example, `upper` and `lower` are methods of strings, which are called with e.g. `word.upper()`:
__program_indented__
"""
def program(self):
word = 'Hello'
print(word.upper)
print(word.upper())
class no_append_for_str(VerbatimStep):
"""
Another example is that `append` is a method of lists. But you can't use `.upper` on a list or `.append` on a string:
__program_indented__
"""
# noinspection PyUnresolvedReferences
def program(self):
word = 'Hello'
word.append('!')
final_text = """
The word 'attribute' in the error message refers to the use of `.` - the error actually comes just from `word.append`, without even a call.
"""
class FunctionsAndMethodsForLists(Page):
# TODO this is quite the information dump and I'd like it to be a little more interactive,
# but users don't need to know these functions off by heart.
class sum_list(Step):
"""
Let's review how to work with lists. Suppose we have a list `nums = [1, 2, 3]`. You can use:
- **`append`**: Add an element to the end of the list. `nums.append(4)` changes the list to `[1, 2, 3, 4]`.
- **`len`**: Returns the number of elements. `len(nums)` is `3`.
- **`range`**: `range(n)` is an object similar to the list of numbers from 0 to `n - 1`. In particular, `range(len(nums))` is like `[0, 1, 2]`.
- **`subscripting`**: Get a value at an index. `nums[0]` is 1, `nums[1]` is 2, `nums[2]` is 3.
- **`+`**: Concatenates lists. `nums + [4, 5]` is `[1, 2, 3, 4, 5]`.
Here's some new things. Try them out in the shell.
- **`subscript assignment`**: Set a value at an index. `nums[0] = 9` changes the list to `[9, 2, 3]`.
- **`join`**: Add a list of strings with a separator in between. This is a method of strings (the separator) which takes an iterable of strings as an argument. `'--'.join(['apples', 'oranges', 'bananas'])` returns `'apples--oranges--bananas'`. You can also use an empty string if you don't want a separator, e.g. `''.join(['apples', 'oranges', 'bananas'])` returns `'applesorangesbananas'`.
- **`sum`**: Add a list of numbers. `sum(nums)` is 6.
- **`in`**: A comparison operator that checks if a value is in a list. `2 in nums` is `True`, but `4 in nums` is `False`.
- **`index`**: Returns the first index of a value in a list. `[7, 8, 9, 8].index(8)` is 1. Raises an error if the value isn't there.
You may recognise some of these from your exercises. I assure you that those exercises were not pointless, as you've now learned valuable fundamental skills. For example, you can use `in` to check if a list contains 5, but there's no similarly easy way to check for a number bigger than 5.
It's useful to know these functions, but it's not easy to learn them all, and there's many more. A more important skill is being able to look things up. For example, here are some typical ways you might Google the above functions if you forgot their names:
- `append`
- python add element to list
- python add item at end of list
- `len`
- python size of list
- python number of elements in list
- python how many characters in string
- `join`
- python combine list of strings with separator
- python add together list of strings with string in between
- `sum`
- python add list of numbers
- python total of numbers
- `in`
- python check if list contains value
- python test if list has element
- `index`
- python get position of element
- python get index of value
Let's practice this skill now. Find a function/method that returns the value in a list which is bigger than any other value. For example, given the list `[21, 55, 4, 91, 62, 49]`, it will return `91`. You should write the answer in the shell as a single small expression. For example, if you were looking for the function `sum`, you could write `sum([21, 55, 4, 91, 62, 49])`. Don't solve this manually with a loop.
"""
hints = """
Use the words 'python' and 'list' in your search query.
In one word, what's special about `91` in the list `[21, 55, 4, 91, 62, 49]`?
'biggest' or 'largest'
'python biggest value in list'
"""
program = "max([21, 55, 4, 91, 62, 49])"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Name(id='max')),
)
class list_insert(Step):
"""
Good find! Let's do one more. If you have a list:
nums = [1, 2, 3, 4, 5]
You could write `nums.append(9)` and `nums` would change to:
[1, 2, 3, 4, 5, 9]
But suppose you don't want the 9 to be at the end, you want it to go between the second and third elements:
[1, 2, 9, 3, 4, 5]
Call the right function/method in the shell to do that.
"""
hints = """
Use the words 'python' and 'list' in your search query.
Instead of putting the value at the beginning or end, we want to put it ____________?
'in the middle' or 'at an index' or 'at a particular position'
'python add value at index'
"""
program = "nums.insert(2, 9)"
def check(self):
return search_ast(
self.stmt,
ast.Call(func=ast.Attribute(attr='insert'),
args=[ast.Constant(value=2),
ast.Constant(value=9)]),
)
class dir_list(VerbatimStep):
"""
Perfect!
It can also be useful to Google things like "python list tutorial", e.g. if:
- Googling a specific method has failed so you want to find it manually.
- You're still confused about lists after this course.
- It's been a while since you learned about lists and you need a reminder.
- You're struggling to solve a problem with lists and you need to go back to basics and strengthen your foundations.
There are also ways to find information without any googling. Try `__program__` in the shell.
"""
program = "dir([])"
final_text = """
`dir()` returns a list of the argument's attributes, which are mostly methods. Many will start with `__` which you can ignore for now - scroll to the end of the list and you'll see some familiar methods.
Here are a few more useful functions/methods. Suppose `nums = [28, 99, 10, 81, 59, 64]`
- **`sorted`**: Takes an iterable and returns a list of the elements in order. `sorted(nums)` returns `[10, 28, 59, 64, 81, 99]`.
- **`pop`**: Removes and returns an element at a given index. `nums.pop(3)` removes `nums[3]` (`81`) from the list and returns it. Without an argument, i.e. just `nums.pop()`, it will remove and return the last element.
- **`remove`**: Removes the first occurrence of the given element. `nums.remove(10)` will leave `nums` as `[28, 99, 81, 59, 64]`. Raises an error if the value doesn't exist. Equivalent to `nums.pop(nums.index(10))`.
- **`count`**: Returns the number of times the argument appears in the list. `[1, 2, 3, 2, 7, 2, 5].count(2)` is 3.
You've already seen that `len` and subscripting work with strings, a bit as if strings are lists of characters. Strings also support some of the new methods we've learned, not just for characters but for any substring. For example:
- `'the' in 'feed the dog and the cat'` is `True`
- `'feed the dog and the cat'.count('the')` is 2
- `'feed the dog and the cat'.index('the')` is 5
Note that in most cases, methods which modify a list in place (`append`, `insert`, `remove`) merely return `None`, while the remaining functions/methods return a new useful value without changing the original argument. The only exception is the `pop` method.
Modifying a value directly is called *mutation* - types of values which can be mutated are *mutable*, while those that can't are *immutable*. Strings are immutable - they don't have any methods like `append` or even subscript assignment. You simply can't change a string - you can only create new strings and use those instead. That means that this is a useless statement on its own:
word.upper()
The string referred to by `word` isn't modified, instead `word.upper()` returned a new string which was immediately discarded. If you want to change the value that `word` refers to, you have to assign a new value to the variable:
word = word.upper()
Or you can use `word.upper()` immediately in a larger expression, e.g.
if word.lower() == 'yes':
"""
class UnderstandingProgramsWithPythonTutor(Page):
final_text = """
It's time to learn about another tool to explore programs. Put some code in the editor and then click the new "Python Tutor" button. Here's some example code if you want:
all_numbers = [2, 4, 8, 1, 9, 7]
small_numbers = []
big_numbers = []
for number in all_numbers:
if number <= 5:
small_numbers.append(number)
else:
big_numbers.append(number)
print(small_numbers)
print(big_numbers)
The button will open a new tab with a visualisation from [pythontutor.com](http://pythontutor.com).
There you can navigate through the program step by step with the "Prev" or "Next" buttons, or drag
the slider left or right. You can also see the values of variables on the right.
"""
class EqualsVsIs(Page):
title = "`==` vs `is`"
class two_separate_lists(VerbatimStep):
"""
It's time to learn some technical details that are often misunderstood and lead to errors.
Run this program:
__program_indented__
"""
def program(self):
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
class same_list(VerbatimStep):
"""
This program is quite straightforward and mostly consists of things you're familiar with.
We create two variables which refer to lists.
The lists have the same elements, so they are equal: `list1 == list2` is `True`.
But then there's a new comparison operator: `is`. Here `list1 is list2` is `False`.
That means that regardless of the two lists being equal,
they are still two separate, distinct, individual lists.
As a result, when you append 4 to `list1`, only `list1` changes.
Now change `list2 = [1, 2, 3]` to `list2 = list1` and see what difference it makes.
"""
program_in_text = False
def program(self):
list1 = [1, 2, 3]
list2 = list1
print(list1)
print(list2)
print(list1 == list2)
print(list1 is list2)
list1.append(4)
print(list1)
print(list2)
final_text = """
Now `list1 is list2` is `True`, because *there is only one list*, and the two variables
`list1` and `list2` both refer to that same list. `list1.append(4)` appends to the one list
and the result can be seen in both `print(list1)` and `print(list2)` because both lines
are now just different ways of printing the same list.
I recommend running both versions with Python Tutor to see how it visualises the difference.
In the second case, the two variables both have arrows pointing to a single list object.
`list2 = list1` doesn't create an eternal link between the variables. If you assign a new value
to *either* of the variables, e.g. `list1 = [7, 8, 9]`, the other variable will be unaffected
and will still point to the original list.
Basically, an assignment like:
list2 = <expression>
means 'make the variable `list2` refer to whatever `<expression>` evaluates to'.
It doesn't make a copy of that value, which is how both variables can end up pointing to the same list.
But as we've learned before, `list2` doesn't remember `<expression>`, only the value.
It doesn't know about other variables.
You can copy a list with the `copy` method:
list2 = list1.copy()
This will make the program behave like the first version again.
If you come across this kind of problem and you're still having trouble understanding this stuff, read the essay [Facts and myths about Python names and values](https://nedbatchelder.com/text/names.html).
"""
class ModifyingWhileIterating(Page):
final_text = """
Consider this program. It loops through a numbers and removes the ones smaller than 10. Or at least, it tries to. I recommend running it with Python Tutor.
numbers = [10, 7, 8, 3, 12, 15]
for i in range(len(numbers)):
number = numbers[i]
if number <= 10:
numbers.pop(i)
print(numbers)
(remember that `numbers.pop(i)` removes the element from `numbers` at index `i`)
As it runs, it clearly skips even looking at 7 or 3 and doesn't remove them, and at the end it fails when it tries to access an index that's too high. Can you see why this happens?
The index variable `i` runs through the usual values 0, 1, 2, ... as it's supposed to, but as the list changes those are no longer the positions we want. For example in the first iteration `i` is 0 and `number` is 10, which gets removed. This shifts the rest of the numbers left one position, so now 7 is in position 0. But then in the next iteration `i` is 1, and `numbers[i]` is 8. 7 got skipped.
We could try writing the program to use `remove` instead of `pop` so we don't have to use indices. It even looks nicer this way.
numbers = [10, 7, 8, 3, 12, 15]
for number in numbers:
if number <= 10:
numbers.remove(number)
print(numbers)
But it turns out this does the same thing, for the same reason. Iterating over a list still goes through the indices under the hood.
The lesson here is to ***never modify something while you iterate over it***. Keep mutation and looping separate.
The good news is that there are many ways to solve this. You can instead just loop over a copy, as in:
for number in numbers.copy():
Now the list being modified and the list being itererated over are separate objects, even if they start out with equal contents.
Similarly, you could loop over the original and modify a copy:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = numbers.copy()
for number in numbers:
if number <= 10:
big_numbers.remove(number)
print(big_numbers)
Or you could build up a new list from scratch. In this case, we've already done a similar thing in an exercise:
numbers = [10, 7, 8, 3, 12, 15]
big_numbers = []
for number in numbers:
if number > 10:
big_numbers.append(number)
print(big_numbers)
"""
| 34.038534
| 415
| 0.608333
| 5,225
| 36,217
| 4.174354
| 0.147177
| 0.009399
| 0.002613
| 0.011462
| 0.17074
| 0.130301
| 0.10834
| 0.088121
| 0.072716
| 0.057631
| 0
| 0.022418
| 0.294254
| 36,217
| 1,063
| 416
| 34.070555
| 0.830908
| 0.342408
| 0
| 0.356061
| 1
| 0.083333
| 0.554466
| 0.00836
| 0
| 0
| 0
| 0.002822
| 0
| 1
| 0.058712
| false
| 0.001894
| 0.013258
| 0.005682
| 0.176136
| 0.104167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9abefb5c7f43f4b3586ebf44ef35bd05d5118a
| 1,223
|
py
|
Python
|
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import redis
import json
import argparse
""" Follows the StackExchange best practice for creating a work queue.
Basically push a task and publish a message that a task is there."""
def PushTask(client, queue, task, topic):
client.lpush(queue, task)
client.publish(topic, queue)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--queue", help="The queue from which workers will grab tasks")
parser.add_argument("-t", "--task", help="The task data")
parser.add_argument("-o", "--topic", help="The topic to which workers are subscribed")
parser.add_argument("-s", "--server", help="redis server host or IP")
parser.add_argument("-p",
"--port",
help="redis server port (default is 6379)",
type=int,
default=6379)
args = parser.parse_args()
if args.queue is None
or args.task is None
or args.topic is None
or args.server is None:
parser.print_help()
else:
client=redis.StrictRedis(host=args.server, args.port)
PushTask(client, args.queue, args.task, args.topic)
| 34.942857
| 95
| 0.614881
| 158
| 1,223
| 4.664557
| 0.424051
| 0.061058
| 0.115332
| 0.048847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008959
| 0.269828
| 1,223
| 34
| 96
| 35.970588
| 0.816349
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.148148
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9deb518dd12c6a3961ce613b76fcc3db2acd68
| 602
|
py
|
Python
|
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
algorithm_training/abc87.py
|
hirotosuzuki/algorithm_training
|
3134bad4ea2ea57a77e05be6f21ba776a558f520
|
[
"MIT"
] | null | null | null |
class TaskA:
def run(self):
V, A, B, C = map(int, input().split())
pass
class TaskB:
def run(self):
A = int(input())
B = int(input())
C = int(input())
X = int(input())
counter = 0
for a in range(A+1):
for b in range(B+1):
for c in range(C+1):
total = 500 * a + 100 * b + 50 * c
if total == X:
counter += 1
print(counter)
class TaskC:
def run(self):
pass
if __name__ == "__main__":
task = TaskB()
task.run()
| 21.5
| 54
| 0.413621
| 78
| 602
| 3.089744
| 0.410256
| 0.165975
| 0.124481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039394
| 0.451827
| 602
| 28
| 55
| 21.5
| 0.690909
| 0
| 0
| 0.208333
| 0
| 0
| 0.013267
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.083333
| 0
| 0
| 0.25
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9e0852ce066b6a61ac5cfb9625f8879b66f594
| 536
|
py
|
Python
|
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import socket,sys,os
TCP_IP = '127.0.0.1'
TCP_PORT = 6262
BUFFER_SIZE = 1024
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((TCP_IP,TCP_PORT))
s.listen(5)
conn, addr = s.accept()
print('Connection entrante :', addr)
data = conn.recv(BUFFER_SIZE)
if data == "m" :
os.popen("chmod +w $PWD")
else :
os.popen("chmod -w $PWD")
while 1 :
data = conn.recv(BUFFER_SIZE)
print data
if data == "1":
break
rep = os.popen(data+" 2>&1")
conn.send("reponse : \n"+rep.read())
conn.close()
| 14.486486
| 51
| 0.641791
| 91
| 536
| 3.681319
| 0.538462
| 0.089552
| 0.071642
| 0.107463
| 0.226866
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042889
| 0.173507
| 536
| 36
| 52
| 14.888889
| 0.713318
| 0.029851
| 0
| 0.090909
| 0
| 0
| 0.144509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045455
| null | null | 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9ea2c54f2546f23ad0eceb20e12ee8b19cd36b
| 888
|
py
|
Python
|
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | 1
|
2018-05-06T00:31:35.000Z
|
2018-05-06T00:31:35.000Z
|
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | null | null | null |
BE/common/helpers.py
|
kosior/ngLearn-1
|
4cc52153876aca409d56bd9cabace9283946bd32
|
[
"MIT"
] | null | null | null |
from rest_framework_jwt.utils import jwt_decode_handler
from users.models import User
from users.serializers import UserSerializer
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}).data
}
def get_token_from_request(request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
return auth[1]
return None
def get_user_from_token(token):
data = jwt_decode_handler(token)
user_id = data.get('user_id')
if user_id:
try:
return User.objects.get(id=user_id)
except User.DoesNotExist:
return None
return None
def get_user_from_request(request):
token = get_token_from_request(request)
if token:
return get_user_from_token(token)
return None
| 24
| 71
| 0.684685
| 118
| 888
| 4.90678
| 0.313559
| 0.055268
| 0.093264
| 0.06563
| 0.226252
| 0.082902
| 0
| 0
| 0
| 0
| 0
| 0.002899
| 0.222973
| 888
| 36
| 72
| 24.666667
| 0.836232
| 0
| 0
| 0.148148
| 0
| 0
| 0.046171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.111111
| 0.037037
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
0aa3e139fa08c65698af3c065bdbf7e9c6759f7b
| 1,946
|
py
|
Python
|
NewsPaperD7(final)/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | 1
|
2021-05-29T21:17:56.000Z
|
2021-05-29T21:17:56.000Z
|
NewsPaperD6/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | null | null | null |
NewsPaperD6/NewsPaper/News/migrations/0001_initial.py
|
GregTMJ/django-files
|
dfd2c8da596522b77fb3dfc8089f0d287a94d53b
|
[
"MIT"
] | 1
|
2021-06-30T12:43:39.000Z
|
2021-06-30T12:43:39.000Z
|
# Generated by Django 3.2 on 2021-04-15 18:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(default='select category', max_length=255, unique=True)),
('subscriber', models.ManyToManyField(related_name='subscriber', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choosing', models.BooleanField(default=False)),
('time_in', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=255, unique=True)),
('text', models.TextField(max_length=255)),
('rating', models.FloatField(default=0.0)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='News.author', verbose_name='User')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='News.category')),
],
),
]
| 42.304348
| 153
| 0.611511
| 202
| 1,946
| 5.757426
| 0.376238
| 0.034394
| 0.048151
| 0.075666
| 0.437661
| 0.399828
| 0.399828
| 0.399828
| 0.285469
| 0.211522
| 0
| 0.017194
| 0.252826
| 1,946
| 45
| 154
| 43.244444
| 0.782669
| 0.022097
| 0
| 0.394737
| 1
| 0
| 0.079432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0aa43893204c6ba098361aa19c39257195d9d726
| 425
|
py
|
Python
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 3
|
2019-10-22T00:16:49.000Z
|
2021-07-15T07:44:43.000Z
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 1,183
|
2018-04-19T18:40:30.000Z
|
2022-03-31T21:05:05.000Z
|
blitz_api/migrations/0020_auto_20190529_1200.py
|
MelanieFJNR/Blitz-API
|
9a6daecd158fe07a6aeb80cbf586781eb688f0f9
|
[
"MIT"
] | 12
|
2018-04-17T19:16:42.000Z
|
2022-01-27T00:19:59.000Z
|
# Generated by Django 2.0.8 on 2019-05-29 16:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blitz_api', '0019_merge_20190524_1719'),
]
operations = [
migrations.AlterField(
model_name='exportmedia',
name='file',
field=models.FileField(upload_to='export/%Y/%m/', verbose_name='file'),
),
]
| 22.368421
| 83
| 0.607059
| 48
| 425
| 5.229167
| 0.833333
| 0.063745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099042
| 0.263529
| 425
| 18
| 84
| 23.611111
| 0.702875
| 0.105882
| 0
| 0
| 1
| 0
| 0.171958
| 0.063492
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0aaa92e8b56443a2b167621484f9881042d7391b
| 983
|
py
|
Python
|
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
ProgramFlow/functions/banner.py
|
kumarvgit/python3
|
318c5e7503fafc9c60082fa123e2930bd82a4ec9
|
[
"MIT"
] | null | null | null |
def banner_text(text):
screen_width = 80
if len(text) > screen_width - 4:
print("EEK!!")
print("THE TEXT IS TOO LONG TO FIT IN THE SPECIFIED WIDTH")
if text == "*":
print("*" * screen_width)
else:
centred_text = text.center(screen_width - 4)
output_string = "**{0}**".format(centred_text)
print(output_string)
banner_text("*")
banner_text("Always look on the bright side of life...")
banner_text("If life seems jolly rotten,")
banner_text("There's something you've forgotten!")
banner_text("And that's to laugh and smile and dance and sing,")
banner_text(" ")
banner_text("When you're feeling in the dumps,")
banner_text("Don't be silly chumps,")
banner_text("Just purse your lips and whistle - that's the thing!")
banner_text("And... always look on the bright side of life...")
banner_text("*")
result = banner_text("Nothing is returned")
print(result)
numbers = [4, 2, 7, 5, 8, 3, 9, 6, 1]
print(numbers.sort())
| 30.71875
| 67
| 0.66531
| 153
| 983
| 4.137255
| 0.496732
| 0.205371
| 0.047393
| 0.063191
| 0.129542
| 0.129542
| 0.129542
| 0.129542
| 0.129542
| 0.129542
| 0
| 0.017588
| 0.190234
| 983
| 31
| 68
| 31.709677
| 0.777638
| 0
| 0
| 0.076923
| 0
| 0
| 0.399797
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0
| 0
| 0.038462
| 0.230769
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0aab9dbbc4006ac10614eb6e13f1101929dde5bc
| 5,242
|
py
|
Python
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 13
|
2015-02-19T17:17:10.000Z
|
2021-12-22T06:48:02.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 85
|
2015-01-06T15:01:51.000Z
|
2018-11-29T09:03:35.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 22
|
2015-06-09T12:08:29.000Z
|
2018-11-20T10:07:01.000Z
|
#!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <[email protected]>, 2014
# objectstoreSiteMover.py
import os
from configSiteMover import config_sm
import SiteMover
from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover
from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover
class objectstoreSiteMover(SiteMover.SiteMover):
"""
ObjectstoreSiteMover
It uses the url to decide which ObjectstoreSiteMover implementation to be used.
"""
copyCommand = "objectstore"
checksum_command = "adler32"
def __init__(self, setup_path='', useTimerCommand=True, *args, **kwrds):
self._setup = setup_path
self._useTimerCommand = useTimerCommand
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
gpfn = gpfn.replace("s3+rucio", "s3")
if gpfn.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
if gpfn.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % gpfn
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
logPath = pdict.get('logPath', '')
if logPath != "":
surl = logPath
else:
surl = os.path.join(destination, lfn)
surl = surl.replace("s3+rucio", "s3")
if surl.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover. put_data(source, destination, fsize, fchecksum, **pdict)
if surl.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover. put_data(source, surl, fsize, fchecksum, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % destination, destination, fsize, fchecksum, config_sm.ARCH_DEFAULT
if __name__ == '__main__':
os.environ['PilotHomeDir'] = os.getcwd()
from SiteInformation import SiteInformation
s1 = SiteInformation()
#s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE')
f = objectstoreSiteMover()
gpfn = "nonsens_gpfn"
lfn = "AOD.310713._000004.pool.root.1"
path = os.getcwd()
fsize = "4261010441"
fchecksum = "9145af38"
dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00"
report = {}
#print f.getGlobalFilePaths(dsname)
#print f.findGlobalFilePath(lfn, dsname)
#print f.getLocalROOTSetup()
#path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename"
"""
source = "/bin/hostname"
dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = 17848
localChecksum = "89b93830"
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = 17848
localChecksum = "89b93830"
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
"""
# test S3 object store
source = "/bin/hostname"
#dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = None
localChecksum = None
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='')
gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = None
localChecksum = None
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
| 44.803419
| 256
| 0.702404
| 618
| 5,242
| 5.857605
| 0.333333
| 0.012431
| 0.039779
| 0.053039
| 0.530663
| 0.513536
| 0.487569
| 0.487569
| 0.427072
| 0.427072
| 0
| 0.093664
| 0.169019
| 5,242
| 116
| 257
| 45.189655
| 0.737374
| 0.15166
| 0
| 0.2
| 0
| 0.05
| 0.226713
| 0.155919
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.1
| null | null | 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab496b4beb92ca3fe52c60cfcbb81b2b17b5de3
| 22,976
|
py
|
Python
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 2
|
2021-04-24T08:07:45.000Z
|
2021-04-24T08:07:46.000Z
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 14
|
2020-12-26T22:01:38.000Z
|
2022-02-09T22:41:46.000Z
|
serde/src/gen/thrift/gen-py/megastruct/ttypes.py
|
amCharlie/hive
|
e1870c190188a3b706849059969c8bec2220b6d2
|
[
"Apache-2.0"
] | 7
|
2021-08-16T07:49:24.000Z
|
2022-03-17T09:04:34.000Z
|
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class MyEnum(object):
LLAMA = 1
ALPACA = 2
_VALUES_TO_NAMES = {
1: "LLAMA",
2: "ALPACA",
}
_NAMES_TO_VALUES = {
"LLAMA": 1,
"ALPACA": 2,
}
class MiniStruct(object):
"""
Attributes:
- my_string
- my_enum
"""
def __init__(self, my_string=None, my_enum=None,):
self.my_string = my_string
self.my_enum = my_enum
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.my_enum = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MiniStruct')
if self.my_string is not None:
oprot.writeFieldBegin('my_string', TType.STRING, 1)
oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)
oprot.writeFieldEnd()
if self.my_enum is not None:
oprot.writeFieldBegin('my_enum', TType.I32, 2)
oprot.writeI32(self.my_enum)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MegaStruct(object):
"""
Attributes:
- my_bool
- my_byte
- my_16bit_int
- my_32bit_int
- my_64bit_int
- my_double
- my_string
- my_binary
- my_string_string_map
- my_string_enum_map
- my_enum_string_map
- my_enum_struct_map
- my_enum_stringlist_map
- my_enum_structlist_map
- my_stringlist
- my_structlist
- my_enumlist
- my_stringset
- my_enumset
- my_structset
"""
def __init__(self, my_bool=None, my_byte=None, my_16bit_int=None, my_32bit_int=None, my_64bit_int=None, my_double=None, my_string=None, my_binary=None, my_string_string_map=None, my_string_enum_map=None, my_enum_string_map=None, my_enum_struct_map=None, my_enum_stringlist_map=None, my_enum_structlist_map=None, my_stringlist=None, my_structlist=None, my_enumlist=None, my_stringset=None, my_enumset=None, my_structset=None,):
self.my_bool = my_bool
self.my_byte = my_byte
self.my_16bit_int = my_16bit_int
self.my_32bit_int = my_32bit_int
self.my_64bit_int = my_64bit_int
self.my_double = my_double
self.my_string = my_string
self.my_binary = my_binary
self.my_string_string_map = my_string_string_map
self.my_string_enum_map = my_string_enum_map
self.my_enum_string_map = my_enum_string_map
self.my_enum_struct_map = my_enum_struct_map
self.my_enum_stringlist_map = my_enum_stringlist_map
self.my_enum_structlist_map = my_enum_structlist_map
self.my_stringlist = my_stringlist
self.my_structlist = my_structlist
self.my_enumlist = my_enumlist
self.my_stringset = my_stringset
self.my_enumset = my_enumset
self.my_structset = my_structset
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.my_bool = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BYTE:
self.my_byte = iprot.readByte()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I16:
self.my_16bit_int = iprot.readI16()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.my_32bit_int = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.my_64bit_int = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.DOUBLE:
self.my_double = iprot.readDouble()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.my_string = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.my_binary = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.MAP:
self.my_string_string_map = {}
(_ktype1, _vtype2, _size0) = iprot.readMapBegin()
for _i4 in range(_size0):
_key5 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val6 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_string_string_map[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.MAP:
self.my_string_enum_map = {}
(_ktype8, _vtype9, _size7) = iprot.readMapBegin()
for _i11 in range(_size7):
_key12 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val13 = iprot.readI32()
self.my_string_enum_map[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.MAP:
self.my_enum_string_map = {}
(_ktype15, _vtype16, _size14) = iprot.readMapBegin()
for _i18 in range(_size14):
_key19 = iprot.readI32()
_val20 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_enum_string_map[_key19] = _val20
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.MAP:
self.my_enum_struct_map = {}
(_ktype22, _vtype23, _size21) = iprot.readMapBegin()
for _i25 in range(_size21):
_key26 = iprot.readI32()
_val27 = MiniStruct()
_val27.read(iprot)
self.my_enum_struct_map[_key26] = _val27
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.MAP:
self.my_enum_stringlist_map = {}
(_ktype29, _vtype30, _size28) = iprot.readMapBegin()
for _i32 in range(_size28):
_key33 = iprot.readI32()
_val34 = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in range(_size35):
_elem40 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val34.append(_elem40)
iprot.readListEnd()
self.my_enum_stringlist_map[_key33] = _val34
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.MAP:
self.my_enum_structlist_map = {}
(_ktype42, _vtype43, _size41) = iprot.readMapBegin()
for _i45 in range(_size41):
_key46 = iprot.readI32()
_val47 = []
(_etype51, _size48) = iprot.readListBegin()
for _i52 in range(_size48):
_elem53 = MiniStruct()
_elem53.read(iprot)
_val47.append(_elem53)
iprot.readListEnd()
self.my_enum_structlist_map[_key46] = _val47
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 15:
if ftype == TType.LIST:
self.my_stringlist = []
(_etype57, _size54) = iprot.readListBegin()
for _i58 in range(_size54):
_elem59 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_stringlist.append(_elem59)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 16:
if ftype == TType.LIST:
self.my_structlist = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in range(_size60):
_elem65 = MiniStruct()
_elem65.read(iprot)
self.my_structlist.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 17:
if ftype == TType.LIST:
self.my_enumlist = []
(_etype69, _size66) = iprot.readListBegin()
for _i70 in range(_size66):
_elem71 = iprot.readI32()
self.my_enumlist.append(_elem71)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 18:
if ftype == TType.SET:
self.my_stringset = set()
(_etype75, _size72) = iprot.readSetBegin()
for _i76 in range(_size72):
_elem77 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.my_stringset.add(_elem77)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 19:
if ftype == TType.SET:
self.my_enumset = set()
(_etype81, _size78) = iprot.readSetBegin()
for _i82 in range(_size78):
_elem83 = iprot.readI32()
self.my_enumset.add(_elem83)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.SET:
self.my_structset = set()
(_etype87, _size84) = iprot.readSetBegin()
for _i88 in range(_size84):
_elem89 = MiniStruct()
_elem89.read(iprot)
self.my_structset.add(_elem89)
iprot.readSetEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('MegaStruct')
if self.my_bool is not None:
oprot.writeFieldBegin('my_bool', TType.BOOL, 1)
oprot.writeBool(self.my_bool)
oprot.writeFieldEnd()
if self.my_byte is not None:
oprot.writeFieldBegin('my_byte', TType.BYTE, 2)
oprot.writeByte(self.my_byte)
oprot.writeFieldEnd()
if self.my_16bit_int is not None:
oprot.writeFieldBegin('my_16bit_int', TType.I16, 3)
oprot.writeI16(self.my_16bit_int)
oprot.writeFieldEnd()
if self.my_32bit_int is not None:
oprot.writeFieldBegin('my_32bit_int', TType.I32, 4)
oprot.writeI32(self.my_32bit_int)
oprot.writeFieldEnd()
if self.my_64bit_int is not None:
oprot.writeFieldBegin('my_64bit_int', TType.I64, 5)
oprot.writeI64(self.my_64bit_int)
oprot.writeFieldEnd()
if self.my_double is not None:
oprot.writeFieldBegin('my_double', TType.DOUBLE, 6)
oprot.writeDouble(self.my_double)
oprot.writeFieldEnd()
if self.my_string is not None:
oprot.writeFieldBegin('my_string', TType.STRING, 7)
oprot.writeString(self.my_string.encode('utf-8') if sys.version_info[0] == 2 else self.my_string)
oprot.writeFieldEnd()
if self.my_binary is not None:
oprot.writeFieldBegin('my_binary', TType.STRING, 8)
oprot.writeBinary(self.my_binary)
oprot.writeFieldEnd()
if self.my_string_string_map is not None:
oprot.writeFieldBegin('my_string_string_map', TType.MAP, 9)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.my_string_string_map))
for kiter90, viter91 in self.my_string_string_map.items():
oprot.writeString(kiter90.encode('utf-8') if sys.version_info[0] == 2 else kiter90)
oprot.writeString(viter91.encode('utf-8') if sys.version_info[0] == 2 else viter91)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_string_enum_map is not None:
oprot.writeFieldBegin('my_string_enum_map', TType.MAP, 10)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.my_string_enum_map))
for kiter92, viter93 in self.my_string_enum_map.items():
oprot.writeString(kiter92.encode('utf-8') if sys.version_info[0] == 2 else kiter92)
oprot.writeI32(viter93)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_string_map is not None:
oprot.writeFieldBegin('my_enum_string_map', TType.MAP, 11)
oprot.writeMapBegin(TType.I32, TType.STRING, len(self.my_enum_string_map))
for kiter94, viter95 in self.my_enum_string_map.items():
oprot.writeI32(kiter94)
oprot.writeString(viter95.encode('utf-8') if sys.version_info[0] == 2 else viter95)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_struct_map is not None:
oprot.writeFieldBegin('my_enum_struct_map', TType.MAP, 12)
oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.my_enum_struct_map))
for kiter96, viter97 in self.my_enum_struct_map.items():
oprot.writeI32(kiter96)
viter97.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_stringlist_map is not None:
oprot.writeFieldBegin('my_enum_stringlist_map', TType.MAP, 13)
oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_stringlist_map))
for kiter98, viter99 in self.my_enum_stringlist_map.items():
oprot.writeI32(kiter98)
oprot.writeListBegin(TType.STRING, len(viter99))
for iter100 in viter99:
oprot.writeString(iter100.encode('utf-8') if sys.version_info[0] == 2 else iter100)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_enum_structlist_map is not None:
oprot.writeFieldBegin('my_enum_structlist_map', TType.MAP, 14)
oprot.writeMapBegin(TType.I32, TType.LIST, len(self.my_enum_structlist_map))
for kiter101, viter102 in self.my_enum_structlist_map.items():
oprot.writeI32(kiter101)
oprot.writeListBegin(TType.STRUCT, len(viter102))
for iter103 in viter102:
iter103.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.my_stringlist is not None:
oprot.writeFieldBegin('my_stringlist', TType.LIST, 15)
oprot.writeListBegin(TType.STRING, len(self.my_stringlist))
for iter104 in self.my_stringlist:
oprot.writeString(iter104.encode('utf-8') if sys.version_info[0] == 2 else iter104)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_structlist is not None:
oprot.writeFieldBegin('my_structlist', TType.LIST, 16)
oprot.writeListBegin(TType.STRUCT, len(self.my_structlist))
for iter105 in self.my_structlist:
iter105.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_enumlist is not None:
oprot.writeFieldBegin('my_enumlist', TType.LIST, 17)
oprot.writeListBegin(TType.I32, len(self.my_enumlist))
for iter106 in self.my_enumlist:
oprot.writeI32(iter106)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.my_stringset is not None:
oprot.writeFieldBegin('my_stringset', TType.SET, 18)
oprot.writeSetBegin(TType.STRING, len(self.my_stringset))
for iter107 in self.my_stringset:
oprot.writeString(iter107.encode('utf-8') if sys.version_info[0] == 2 else iter107)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.my_enumset is not None:
oprot.writeFieldBegin('my_enumset', TType.SET, 19)
oprot.writeSetBegin(TType.I32, len(self.my_enumset))
for iter108 in self.my_enumset:
oprot.writeI32(iter108)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.my_structset is not None:
oprot.writeFieldBegin('my_structset', TType.SET, 20)
oprot.writeSetBegin(TType.STRUCT, len(self.my_structset))
for iter109 in self.my_structset:
iter109.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(MiniStruct)
MiniStruct.thrift_spec = (
None, # 0
(1, TType.STRING, 'my_string', 'UTF8', None, ), # 1
(2, TType.I32, 'my_enum', None, None, ), # 2
)
all_structs.append(MegaStruct)
MegaStruct.thrift_spec = (
None, # 0
(1, TType.BOOL, 'my_bool', None, None, ), # 1
(2, TType.BYTE, 'my_byte', None, None, ), # 2
(3, TType.I16, 'my_16bit_int', None, None, ), # 3
(4, TType.I32, 'my_32bit_int', None, None, ), # 4
(5, TType.I64, 'my_64bit_int', None, None, ), # 5
(6, TType.DOUBLE, 'my_double', None, None, ), # 6
(7, TType.STRING, 'my_string', 'UTF8', None, ), # 7
(8, TType.STRING, 'my_binary', 'BINARY', None, ), # 8
(9, TType.MAP, 'my_string_string_map', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 9
(10, TType.MAP, 'my_string_enum_map', (TType.STRING, 'UTF8', TType.I32, None, False), None, ), # 10
(11, TType.MAP, 'my_enum_string_map', (TType.I32, None, TType.STRING, 'UTF8', False), None, ), # 11
(12, TType.MAP, 'my_enum_struct_map', (TType.I32, None, TType.STRUCT, [MiniStruct, None], False), None, ), # 12
(13, TType.MAP, 'my_enum_stringlist_map', (TType.I32, None, TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 13
(14, TType.MAP, 'my_enum_structlist_map', (TType.I32, None, TType.LIST, (TType.STRUCT, [MiniStruct, None], False), False), None, ), # 14
(15, TType.LIST, 'my_stringlist', (TType.STRING, 'UTF8', False), None, ), # 15
(16, TType.LIST, 'my_structlist', (TType.STRUCT, [MiniStruct, None], False), None, ), # 16
(17, TType.LIST, 'my_enumlist', (TType.I32, None, False), None, ), # 17
(18, TType.SET, 'my_stringset', (TType.STRING, 'UTF8', False), None, ), # 18
(19, TType.SET, 'my_enumset', (TType.I32, None, False), None, ), # 19
(20, TType.SET, 'my_structset', (TType.STRUCT, [MiniStruct, None], False), None, ), # 20
)
fix_spec(all_structs)
del all_structs
| 43.680608
| 430
| 0.550531
| 2,549
| 22,976
| 4.70969
| 0.105139
| 0.057976
| 0.022491
| 0.035985
| 0.596668
| 0.480966
| 0.371512
| 0.316868
| 0.247813
| 0.237984
| 0
| 0.045433
| 0.346666
| 22,976
| 525
| 431
| 43.76381
| 0.754314
| 0.025244
| 0
| 0.396963
| 1
| 0
| 0.034345
| 0.003956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030369
| false
| 0
| 0.010846
| 0.013015
| 0.08243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab4aa21cfd4871d1766355bdd0923074d0f5c05
| 32,515
|
py
|
Python
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2020-07-08T13:20:27.000Z
|
2020-07-08T13:20:27.000Z
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | 6
|
2020-06-24T18:56:06.000Z
|
2022-02-26T08:53:11.000Z
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import unittest
import sys
import os
import string
import time
import socket
import fileinput
import platform
import re
try:
import subprocess32 as subprocess
except:
import subprocess
import pg
def get_port_from_conf():
file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf'
if os.path.isfile(file):
with open(file) as f:
for line in f.xreadlines():
match = re.search('port=\d+',line)
if match:
match1 = re.search('\d+', match.group())
if match1:
return match1.group()
def get_port():
port = os.environ['PGPORT']
if not port:
port = get_port_from_conf()
return port if port else 5432
def get_ip(hostname=None):
if hostname is None:
hostname = socket.gethostname()
else:
hostname = hostname
hostinfo = socket.getaddrinfo(hostname, None)
ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo]))
for myip in ipaddrlist:
if myip.find(":") > 0:
ipv6 = myip
return ipv6
elif myip.find(".") > 0:
ipv4 = myip
return ipv4
def getPortMasterOnly(host = 'localhost',master_value = None,
user = os.environ.get('USER'),gphome = os.environ['GPHOME'],
mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']):
master_pattern = "Context:\s*-1\s*Value:\s*\d+"
command = "gpconfig -s %s" % ( "port" )
cmd = "source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s" \
% (gphome, mdd, port, command)
(ok,out) = run(cmd)
if not ok:
raise Exception("Unable to connect to segment server %s as user %s" % (host, user))
for line in out:
out = line.split('\n')
for line in out:
if re.search(master_pattern, line):
master_value = int(line.split()[3].strip())
if master_value is None:
error_msg = "".join(out)
raise Exception(error_msg)
return str(master_value)
"""
Global Values
"""
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('..'))
if UPD not in sys.path:
sys.path.append(UPD)
DBNAME = "postgres"
USER = os.environ.get( "LOGNAME" )
HOST = socket.gethostname()
GPHOME = os.getenv("GPHOME")
PGPORT = get_port()
PGUSER = os.environ.get("PGUSER")
if PGUSER is None:
PGUSER = USER
PGHOST = os.environ.get("PGHOST")
if PGHOST is None:
PGHOST = HOST
d = mkpath('config')
if not os.path.exists(d):
os.mkdir(d)
def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter="'|'",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False):
f = open(mkpath('config/config_file'),'w')
f.write("VERSION: 1.0.0.1")
if database:
f.write("\nDATABASE: "+database)
f.write("\nUSER: "+os.environ.get('USER'))
f.write("\nHOST: "+hostNameAddrs)
f.write("\nPORT: "+masterPort)
f.write("\nGPLOAD:")
f.write("\n INPUT:")
f.write("\n - SOURCE:")
f.write("\n LOCAL_HOSTNAME:")
f.write("\n - "+hostNameAddrs)
if portNum:
f.write("\n PORT: "+portNum)
f.write("\n FILE:")
f.write("\n - "+mkpath(file))
if columns_flag=='1':
f.write("\n - COLUMNS:")
f.write("\n - s_s1: text")
f.write("\n - s_s2: text")
f.write("\n - s_dt: timestamp")
f.write("\n - s_s3: text")
f.write("\n - s_n1: smallint")
f.write("\n - s_n2: integer")
f.write("\n - s_n3: bigint")
f.write("\n - s_n4: decimal")
f.write("\n - s_n5: numeric")
f.write("\n - s_n6: real")
f.write("\n - s_n7: double precision")
f.write("\n - s_n8: text")
f.write("\n - s_n9: text")
if format:
f.write("\n - FORMAT: "+format)
if log_errors:
f.write("\n - LOG_ERRORS: true")
f.write("\n - ERROR_LIMIT: " + error_limit)
if error_table:
f.write("\n - ERROR_TABLE: " + error_table)
f.write("\n - ERROR_LIMIT: " + error_limit)
if delimiter:
f.write("\n - DELIMITER: "+delimiter)
if encoding:
f.write("\n - ENCODING: "+encoding)
if escape:
f.write("\n - ESCAPE: "+escape)
if quote:
f.write("\n - QUOTE: "+quote)
if fill:
f.write("\n - FILL_MISSING_FIELDS: true")
f.write("\n OUTPUT:")
f.write("\n - TABLE: "+table)
if mode:
if mode == 'insert':
f.write("\n - MODE: "+'insert')
if mode == 'update':
f.write("\n - MODE: "+'update')
if mode == 'merge':
f.write("\n - MODE: "+'merge')
f.write("\n - UPDATE_COLUMNS:")
f.write("\n - n2")
f.write("\n - MATCH_COLUMNS:")
f.write("\n - n1")
f.write("\n - s1")
f.write("\n - s2")
if mapping=='1':
f.write("\n - MAPPING:")
f.write("\n s1: s_s1")
f.write("\n s2: s_s2")
f.write("\n dt: s_dt")
f.write("\n s3: s_s3")
f.write("\n n1: s_n1")
f.write("\n n2: s_n2")
f.write("\n n3: s_n3")
f.write("\n n4: s_n4")
f.write("\n n5: s_n5")
f.write("\n n6: s_n6")
f.write("\n n7: s_n7")
f.write("\n n8: s_n8")
f.write("\n n9: s_n9")
if externalSchema:
f.write("\n EXTERNAL:")
f.write("\n - SCHEMA: "+externalSchema)
if preload:
f.write("\n PRELOAD:")
f.write("\n - REUSE_TABLES: "+reuse_flag)
f.write("\n - FAST_MATCH: "+fast_match)
if staging_table:
f.write("\n - STAGING_TABLE: "+staging_table)
f.write("\n")
f.close()
def runfile(ifile, flag='', dbname=None, outputPath="", outputFile="",
username=None,
PGOPTIONS=None, host = None, port = None):
if len(outputFile) == 0:
(ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag,
dbname=dbname , username=username,
PGOPTIONS=PGOPTIONS, host = host, port = port)
else:
(ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag,
dbname= dbname, username= username,
PGOPTIONS= PGOPTIONS, host = host, port = port)
return (ok, out)
def psql_run(ifile = None, ofile = None, cmd = None,
flag = '-e',dbname = None,
username = None,
PGOPTIONS = None, host = None, port = None):
'''
Run a command or file against psql. Return True if OK.
@param dbname: database name
@param ifile: input file
@param cmd: command line
@param flag: -e Run SQL with no comments (default)
-a Run SQL with comments and psql notice
@param username: psql user
@param host : to connect to a different host
@param port : port where gpdb is running
@param PGOPTIONS: connects to postgres via utility mode
'''
if dbname is None:
dbname = DBNAME
if username is None:
username = PGUSER # Use the default login user
if PGOPTIONS is None:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if host is None:
host = "-h %s" % PGHOST
else:
host = "-h %s" % host
if port is None:
port = ""
else:
port = "-p %s" % port
if cmd:
arg = '-c "%s"' % cmd
elif ifile:
arg = ' < ' + ifile
if not (flag == '-q'): # Don't echo commands sent to server
arg = '-e < ' + ifile
if flag == '-a':
arg = '-f ' + ifile
else:
raise PSQLError('missing cmd and ifile')
if ofile == '-':
ofile = '2>&1'
elif not ofile:
ofile = '> /dev/null 2>&1'
else:
ofile = '> %s 2>&1' % ofile
return run('%s psql -d %s %s %s -U %s %s %s %s' %
(PGOPTIONS, dbname, host, port, username, flag, arg, ofile))
def run(cmd):
"""
Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise.
@params cmd: The command to run at the shell.
oFile: an optional output file.
mode: What to do if the output file already exists: 'a' = append;
'w' = write. Defaults to append (so that the function is
backwards compatible). Yes, this is passed to the open()
function, so you can theoretically pass any value that is
valid for the second parameter of open().
"""
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = p.communicate()[0]
ret = []
ret.append(out)
rc = False if p.wait() else True
return (rc,ret)
def outFile(fname,outputPath = ''):
return changeExtFile(fname, ".out", outputPath)
def diffFile( fname, outputPath = "" ):
return changeExtFile( fname, ".diff", outputPath )
def changeExtFile( fname, ext = ".diff", outputPath = "" ):
if len( outputPath ) == 0:
return os.path.splitext( fname )[0] + ext
else:
filename = fname.split( "/" )
fname = os.path.splitext( filename[len( filename ) - 1] )[0]
return outputPath + "/" + fname + ext
def gpdbAnsFile(fname):
ext = '.ans'
return os.path.splitext(fname)[0] + ext
def isFileEqual( f1, f2, optionalFlags = "", outputPath = "", myinitfile = ""):
LMYD = os.path.abspath(os.path.dirname(__file__))
if not os.access( f1, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f1 )
if not os.access( f2, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f2 )
dfile = diffFile( f1, outputPath = outputPath )
# Gets the suitePath name to add init_file
suitePath = f1[0:f1.rindex( "/" )]
if os.path.exists(suitePath + "/init_file"):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file '
'%s %s > %s 2>&1' % (LMYD, suitePath, f1, f2, dfile))
else:
if os.path.exists(myinitfile):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s '
'%s %s > %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile))
else:
(ok, out) = run( '../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file '
'%s %s > %s 2>&1' % ( LMYD, f1, f2, dfile ) )
if ok:
os.unlink( dfile )
return ok
def read_diff(ifile, outputPath):
"""
Opens the diff file that is assocated with the given input file and returns
its contents as a string.
"""
dfile = diffFile(ifile, outputPath)
with open(dfile, 'r') as diff:
return diff.read()
def modify_sql_file(num):
file = mkpath('query%d.sql' % num)
user = os.environ.get('USER')
if not user:
user = os.environ.get('USER')
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
line = line.replace("gpload.py ","gpload ")
print str(re.sub('\n','',line))
def copy_data(source='',target=''):
cmd = 'cp '+ mkpath('data/' + source) + ' ' + mkpath(target)
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
return p.communicate()
hostNameAddrs = get_ip(HOST)
masterPort = getPortMasterOnly()
def get_table_name():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
queryString = """SELECT relname
from pg_class
WHERE relname
like 'ext_gpload_reusable%'
OR relname
like 'staging_gpload_reusable%';"""
resultList = db.query(queryString.encode('utf-8')).getresult()
return resultList
def drop_tables():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
list = get_table_name()
for i in list:
name = i[0]
match = re.search('ext_gpload',name)
if match:
queryString = "DROP EXTERNAL TABLE %s" % name
db.query(queryString.encode('utf-8'))
else:
queryString = "DROP TABLE %s" % name
db.query(queryString.encode('utf-8'))
class PSQLError(Exception):
'''
PSQLError is the base class for exceptions in this module
http://docs.python.org/tutorial/errors.html
We want to raise an error and not a failure. The reason for an error
might be program error, file not found, etc.
Failure is define as test case failures, when the output is different
from the expected result.
'''
pass
class GPLoad_FormatOpts_TestCase(unittest.TestCase):
def check_result(self,ifile, optionalFlags = "-U3", outputPath = ""):
"""
PURPOSE: compare the actual and expected output files and report an
error if they don't match.
PARAMETERS:
ifile: the name of the .sql file whose actual and expected outputs
we want to compare. You may include the path as well as the
filename. This function will process this file name to
figure out the proper names of the .out and .ans files.
optionalFlags: command-line options (if any) for diff.
For example, pass " -B " (with the blank spaces) to ignore
blank lines. By default, diffs are unified with 3 lines of
context (i.e. optionalFlags is "-U3").
"""
f1 = gpdbAnsFile(ifile)
f2 = outFile(ifile, outputPath=outputPath)
result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath)
diff = None if result else read_diff(ifile, outputPath)
self.assertTrue(result, "query resulted in diff:\n{}".format(diff))
return True
def doTest(self, num):
file = mkpath('query%d.diff' % num)
if os.path.isfile(file):
run("rm -f" + " " + file)
modify_sql_file(num)
file = mkpath('query%d.sql' % num)
runfile(file)
self.check_result(file)
def test_00_gpload_formatOpts_setup(self):
"0 gpload setup"
for num in range(1,40):
f = open(mkpath('query%d.sql' % num),'w')
f.write("\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n"+"\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n")
f.close()
file = mkpath('setup.sql')
runfile(file)
self.check_result(file)
def test_01_gpload_formatOpts_delimiter(self):
"1 gpload formatOpts delimiter '|' with reuse "
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'|'")
self.doTest(1)
def test_02_gpload_formatOpts_delimiter(self):
"2 gpload formatOpts delimiter '\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'\t'")
self.doTest(2)
def test_03_gpload_formatOpts_delimiter(self):
"3 gpload formatOpts delimiter E'\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\\t'")
self.doTest(3)
def test_04_gpload_formatOpts_delimiter(self):
"4 gpload formatOpts delimiter E'\u0009' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\u0009'")
self.doTest(4)
def test_05_gpload_formatOpts_delimiter(self):
"5 gpload formatOpts delimiter E'\\'' with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\''")
self.doTest(5)
def test_06_gpload_formatOpts_delimiter(self):
"6 gpload formatOpts delimiter \"'\" with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="\"'\"")
self.doTest(6)
def test_07_gpload_reuse_table_insert_mode_without_reuse(self):
"7 gpload insert mode without reuse"
runfile(mkpath('setup.sql'))
f = open(mkpath('query7.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'")
f.close()
write_config_file(mode='insert',reuse_flag='false')
self.doTest(7)
def test_08_gpload_reuse_table_update_mode_with_reuse(self):
"8 gpload update mode with reuse"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',file='data_file.txt')
self.doTest(8)
def test_09_gpload_reuse_table_update_mode_without_reuse(self):
"9 gpload update mode without reuse"
f = open(mkpath('query9.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'\n"+"\! psql -d reuse_gptest -c 'select * from texttable where n2=222;'")
f.close()
copy_data('external_file_05.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',file='data_file.txt')
self.doTest(9)
def test_10_gpload_reuse_table_merge_mode_with_reuse(self):
"10 gpload merge mode with reuse "
drop_tables()
copy_data('external_file_06.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(10)
def test_11_gpload_reuse_table_merge_mode_without_reuse(self):
"11 gpload merge mode without reuse "
copy_data('external_file_07.txt','data_file.txt')
write_config_file('merge','false',file='data_file.txt')
self.doTest(11)
def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self):
"12 gpload merge mode with reuse (RERUN with different columns number in file) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(12)
def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self):
"13 gpload merge mode with reuse (RERUN with different columns number in DB table) "
preTest = mkpath('pre_test_13.sql')
psql_run(preTest, dbname='reuse_gptest')
copy_data('external_file_09.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(13)
def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self):
"14 gpload update mode with reuse (RERUN) "
write_config_file('update','true',file='data_file.txt')
self.doTest(14)
def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self):
"15 gpload merge mode with different columns' order "
copy_data('external_file_10.txt','data/data_file.tbl')
write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1')
self.doTest(15)
def test_16_gpload_formatOpts_quote(self):
"16 gpload formatOpts quote unspecified in CSV with reuse "
copy_data('external_file_11.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','")
self.doTest(16)
def test_17_gpload_formatOpts_quote(self):
"17 gpload formatOpts quote '\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="'\x26'")
self.doTest(17)
def test_18_gpload_formatOpts_quote(self):
"18 gpload formatOpts quote E'\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="E'\x26'")
self.doTest(18)
def test_19_gpload_formatOpts_escape(self):
"19 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
file = mkpath('setup.sql')
runfile(file)
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\')
self.doTest(19)
def test_20_gpload_formatOpts_escape(self):
"20 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\x5C')
self.doTest(20)
def test_21_gpload_formatOpts_escape(self):
"21 gpload formatOpts escape E'\\\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape="E'\\\\'")
self.doTest(21)
# case 22 is flaky on concourse. It may report: Fatal Python error: GC object already tracked during testing.
# This is seldom issue. we can't reproduce it locally, so we disable it, in order to not blocking others
#def test_22_gpload_error_count(self):
# "22 gpload error count"
# f = open(mkpath('query22.sql'),'a')
# f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
# f.close()
# f = open(mkpath('data/large_file.csv'),'w')
# for i in range(0, 10000):
# if i % 2 == 0:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
# else:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
# f.close()
# copy_data('large_file.csv','data_file.csv')
# write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000')
# self.doTest(22)
def test_23_gpload_error_count(self):
"23 gpload error_table"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query23.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",error_table="err_table",error_limit='90000000')
self.doTest(23)
def test_24_gpload_error_count(self):
"24 gpload error count with ext schema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query24.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000',externalSchema='test')
self.doTest(24)
def test_25_gpload_ext_staging_table(self):
"25 gpload reuse ext_staging_table if it is configured"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query25.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table')
self.doTest(25)
def test_26_gpload_ext_staging_table_with_externalschema(self):
"26 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query26.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test')
self.doTest(26)
def test_27_gpload_ext_staging_table_with_externalschema(self):
"27 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query27.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema="'%'")
self.doTest(27)
def test_28_gpload_ext_staging_table_with_dot(self):
"28 gpload reuse ext_staging_table if it is configured with dot"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query28.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='t.staging_table')
self.doTest(28)
def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self):
"29 gpload insert mode with reuse and null"
runfile(mkpath('setup.sql'))
f = open(mkpath('query29.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable where n2 is null;'")
f.close()
copy_data('external_file_14.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100')
self.doTest(29)
def test_30_gpload_reuse_table_update_mode_with_fast_match(self):
"30 gpload update mode with fast match"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(30)
def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self):
"31 gpload update mode with fast match and differenct columns number) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(31)
def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self):
"32 gpload update mode when reuse table is false and fast match is true"
drop_tables()
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt')
self.doTest(32)
def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self):
"33 gpload update mode with fast match and external schema"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test')
self.doTest(33)
def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self):
"34 gpload merge mode with fast match and encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK')
self.doTest(34)
def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self):
"35 gpload does not reuse table when encoding is setted from GBK to empty"
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(35)
def test_36_gpload_reuse_table_merge_mode_default_encoding(self):
"36 gpload merge mode with encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK')
self.doTest(36)
def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self):
"37 gpload merge mode with invalid encoding"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx')
self.doTest(37)
def test_38_gpload_without_preload(self):
"38 gpload insert mode without preload"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table="err_table",error_limit='1000',preload=False)
self.doTest(38)
def test_39_gpload_fill_missing_fields(self):
"39 gpload fill missing fields"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True)
self.doTest(39)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase)
runner = unittest.TextTestRunner(verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
| 42.172503
| 421
| 0.611041
| 4,386
| 32,515
| 4.347697
| 0.120383
| 0.02706
| 0.023127
| 0.034611
| 0.524411
| 0.480518
| 0.454612
| 0.416487
| 0.386019
| 0.359589
| 0
| 0.024738
| 0.249085
| 32,515
| 770
| 422
| 42.227273
| 0.756266
| 0.030909
| 0
| 0.257282
| 0
| 0.006472
| 0.291934
| 0.010893
| 0
| 0
| 0
| 0
| 0.001618
| 0
| null | null | 0.001618
| 0.019417
| null | null | 0.004854
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab8196f812a9bd1c5cff6d84c43cd3a82467a55
| 618
|
py
|
Python
|
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | 1
|
2017-04-25T10:56:01.000Z
|
2017-04-25T10:56:01.000Z
|
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | null | null | null |
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 11:38:18 2016
@author: thomasbarillot
VMI control
"""
from ctypes import cdll
#slib="VMIcrtl_ext.dll"
#hlib=cdll('VMIcrtl.dll')
import VMIcrtl_ext
test=VMIcrtl_ext.VMIcrtl()
#%%
print test.GetFilename()
#%%
test.setFilename('20161115_1841.dat')
print test.GetFilename()
#%%
test.StartAcquisitionPrev()
#%%
test.StopAcquisition()
#%%
img=test.RecallImagePrev()
#%%
import numpy as np
print np.shape(img)
a=np.array(img)
print a
#%%
from matplotlib import pyplot as plt
#%%
b=np.reshape(a,[400,400])
print b
plt.figure()
plt.pcolor(np.reshape(a,[400,400]))
| 12.875
| 37
| 0.699029
| 90
| 618
| 4.755556
| 0.555556
| 0.070093
| 0.093458
| 0.11215
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066543
| 0.124595
| 618
| 48
| 38
| 12.875
| 0.724584
| 0.134304
| 0
| 0.111111
| 0
| 0
| 0.038375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.222222
| null | null | 0.277778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab878278314d67f6d0be9f6568f133ce9e1ee76
| 8,119
|
py
|
Python
|
var/spack/repos/builtin/packages/openssl/package.py
|
vitodb/spack
|
b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/openssl/package.py
|
vitodb/spack
|
b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-01-11T09:16:43.000Z
|
2021-01-12T20:07:23.000Z
|
var/spack/repos/builtin/packages/openssl/package.py
|
vitodb/spack
|
b9ab1de4c5f7b21d9f9cb88b7251820a48e82d27
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-01-06T18:58:26.000Z
|
2021-01-06T18:58:26.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
from spack import *
import spack.architecture
import os
class Openssl(Package): # Uses Fake Autotools, should subclass Package
"""OpenSSL is an open source project that provides a robust,
commercial-grade, and full-featured toolkit for the Transport
Layer Security (TLS) and Secure Sockets Layer (SSL) protocols.
It is also a general-purpose cryptography library."""
homepage = "http://www.openssl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "http://www.openssl.org/source/openssl-1.1.1d.tar.gz"
list_url = "http://www.openssl.org/source/old/"
list_depth = 1
# The latest stable version is the 1.1.1 series. This is also our Long Term
# Support (LTS) version, supported until 11th September 2023.
version('1.1.1g', sha256='ddb04774f1e32f0c49751e21b67216ac87852ceb056b75209af2443400636d46')
version('1.1.1f', sha256='186c6bfe6ecfba7a5b48c47f8a1673d0f3b0e5ba2e25602dd23b629975da3f35')
version('1.1.1e', sha256='694f61ac11cb51c9bf73f54e771ff6022b0327a43bbdfa1b2f19de1662a6dcbe')
version('1.1.1d', sha256='1e3a91bc1f9dfce01af26026f856e064eab4c8ee0a8f457b5ae30b40b8b711f2')
version('1.1.1c', sha256='f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90')
version('1.1.1b', sha256='5c557b023230413dfb0756f3137a13e6d726838ccd1430888ad15bfb2b43ea4b')
version('1.1.1a', sha256='fc20130f8b7cbd2fb918b2f14e2f429e109c31ddd0fb38fc5d71d9ffed3f9f41')
version('1.1.1', sha256='2836875a0f89c03d0fdf483941512613a50cfb421d6fd94b9f41d7279d586a3d')
# The 1.1.0 series is out of support and should not be used.
version('1.1.0l', sha256='74a2f756c64fd7386a29184dc0344f4831192d61dc2481a93a4c5dd727f41148')
version('1.1.0k', sha256='efa4965f4f773574d6cbda1cf874dbbe455ab1c0d4f906115f867d30444470b1')
version('1.1.0j', sha256='31bec6c203ce1a8e93d5994f4ed304c63ccf07676118b6634edded12ad1b3246')
version('1.1.0i', sha256='ebbfc844a8c8cc0ea5dc10b86c9ce97f401837f3fa08c17b2cdadc118253cf99')
version('1.1.0g', sha256='de4d501267da39310905cb6dc8c6121f7a2cad45a7707f76df828fe1b85073af')
version('1.1.0e', sha256='57be8618979d80c910728cfc99369bf97b2a1abd8f366ab6ebdee8975ad3874c')
version('1.1.0d', sha256='7d5ebb9e89756545c156ff9c13cf2aa6214193b010a468a3bc789c3c28fe60df')
version('1.1.0c', sha256='fc436441a2e05752d31b4e46115eb89709a28aef96d4fe786abe92409b2fd6f5')
# The 1.0.2 series is out of support and should not be used.
version('1.0.2u', sha256='ecd0c6ffb493dd06707d38b14bb4d8c2288bb7033735606569d8f90f89669d16')
version('1.0.2t', sha256='14cb464efe7ac6b54799b34456bd69558a749a4931ecfd9cf9f71d7881cac7bc')
version('1.0.2s', sha256='cabd5c9492825ce5bd23f3c3aeed6a97f8142f606d893df216411f07d1abab96')
version('1.0.2r', sha256='ae51d08bba8a83958e894946f15303ff894d75c2b8bbd44a852b64e3fe11d0d6')
version('1.0.2p', sha256='50a98e07b1a89eb8f6a99477f262df71c6fa7bef77df4dc83025a2845c827d00')
version('1.0.2o', sha256='ec3f5c9714ba0fd45cb4e087301eb1336c317e0d20b575a125050470e8089e4d')
version('1.0.2n', sha256='370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe')
version('1.0.2m', sha256='8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f')
version('1.0.2k', sha256='6b3977c61f2aedf0f96367dcfb5c6e578cf37e7b8d913b4ecb6643c3cb88d8c0')
version('1.0.2j', sha256='e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431')
version('1.0.2i', sha256='9287487d11c9545b6efb287cdb70535d4e9b284dd10d51441d9b9963d000de6f')
version('1.0.2h', sha256='1d4007e53aad94a5b2002fe045ee7bb0b3d98f1a47f8b2bc851dcd1c74332919')
version('1.0.2g', sha256='b784b1b3907ce39abf4098702dade6365522a253ad1552e267a9a0e89594aa33')
version('1.0.2f', sha256='932b4ee4def2b434f85435d9e3e19ca8ba99ce9a065a61524b429a9d5e9b2e9c')
version('1.0.2e', sha256='e23ccafdb75cfcde782da0151731aa2185195ac745eea3846133f2e05c0e0bff')
version('1.0.2d', sha256='671c36487785628a703374c652ad2cebea45fa920ae5681515df25d9f2c9a8c8')
# The 1.0.1 version is out of support and should not be used.
version('1.0.1u', sha256='4312b4ca1215b6f2c97007503d80db80d5157f76f8f7d3febbe6b4c56ff26739')
version('1.0.1t', sha256='4a6ee491a2fdb22e519c76fdc2a628bb3cec12762cd456861d207996c8a07088')
version('1.0.1r', sha256='784bd8d355ed01ce98b812f873f8b2313da61df7c7b5677fcf2e57b0863a3346')
version('1.0.1h', sha256='9d1c8a9836aa63e2c6adb684186cbd4371c9e9dcc01d6e3bb447abf2d4d3d093')
version('1.0.1e', sha256='f74f15e8c8ff11aa3d5bb5f276d202ec18d7246e95f961db76054199c69c1ae3')
variant('systemcerts', default=True, description='Use system certificates')
depends_on('zlib')
depends_on('[email protected]:', type=('build', 'test'))
parallel = False
@property
def libs(self):
return find_libraries(['libssl', 'libcrypto'], root=self.prefix.lib)
def handle_fetch_error(self, error):
tty.warn("Fetching OpenSSL failed. This may indicate that OpenSSL has "
"been updated, and the version in your instance of Spack is "
"insecure. Consider updating to the latest OpenSSL version.")
def install(self, spec, prefix):
# OpenSSL uses a variable APPS in its Makefile. If it happens to be set
# in the environment, then this will override what is set in the
# Makefile, leading to build errors.
env.pop('APPS', None)
if str(spec.target.family) in ('x86_64', 'ppc64'):
# This needs to be done for all 64-bit architectures (except Linux,
# where it happens automatically?)
env['KERNEL_BITS'] = '64'
options = ['zlib', 'shared']
if spec.satisfies('@1.0'):
options.append('no-krb5')
# clang does not support the .arch directive in assembly files.
if 'clang' in self.compiler.cc and \
'aarch64' in spack.architecture.sys_type():
options.append('no-asm')
config = Executable('./config')
config('--prefix=%s' % prefix,
'--openssldir=%s' % join_path(prefix, 'etc', 'openssl'),
'-I{0}'.format(self.spec['zlib'].prefix.include),
'-L{0}'.format(self.spec['zlib'].prefix.lib),
*options)
# Remove non-standard compiler options if present. These options are
# present e.g. on Darwin. They are non-standard, i.e. most compilers
# (e.g. gcc) will not accept them.
filter_file(r'-arch x86_64', '', 'Makefile')
make()
if self.run_tests:
make('test') # 'VERBOSE=1'
make('install')
@run_after('install')
def link_system_certs(self):
if '+systemcerts' not in self.spec:
return
system_dirs = [
# CentOS, Fedora, RHEL
'/etc/pki/tls',
# Ubuntu
'/usr/lib/ssl',
# OpenSUSE
'/etc/ssl'
]
pkg_dir = join_path(self.prefix, 'etc', 'openssl')
for directory in system_dirs:
sys_cert = join_path(directory, 'cert.pem')
pkg_cert = join_path(pkg_dir, 'cert.pem')
# If a bundle exists, use it. This is the preferred way on Fedora,
# where the certs directory does not work.
if os.path.exists(sys_cert) and not os.path.exists(pkg_cert):
os.symlink(sys_cert, pkg_cert)
sys_certs = join_path(directory, 'certs')
pkg_certs = join_path(pkg_dir, 'certs')
# If the certs directory exists, symlink it into the package.
# We symlink the whole directory instead of all files because
# the directory contents might change without Spack noticing.
if os.path.isdir(sys_certs) and not os.path.islink(pkg_certs):
os.rmdir(pkg_certs)
os.symlink(sys_certs, pkg_certs)
| 51.713376
| 96
| 0.711787
| 832
| 8,119
| 6.897837
| 0.419471
| 0.051577
| 0.032933
| 0.008887
| 0.041122
| 0.041122
| 0.023349
| 0.023349
| 0.023349
| 0.023349
| 0
| 0.263539
| 0.18352
| 8,119
| 156
| 97
| 52.044872
| 0.602202
| 0.204828
| 0
| 0
| 0
| 0.010204
| 0.498985
| 0.369942
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.040816
| 0.010204
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab9be78769ca53a9456cd93a3fd3ab2a85a0c35
| 4,799
|
py
|
Python
|
vispy/util/profiler.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/util/profiler.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/util/profiler.py
|
izaid/vispy
|
402cf95bfef88d70c9c45bb27c532ed72944e14a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Adapted from PyQtGraph
import sys
from . import ptime
from .. import config
class Profiler(object):
"""Simple profiler allowing directed, hierarchical measurement of time
intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `VISPYPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `VISPYPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "vispy.." prefix from the module.
"""
_profilers = (config['profile'].split(",") if config['profile'] is not None
else [])
_depth = 0
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabled_profiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if (disabled is True or
(disabled == 'env' and len(cls._profilers) == 0)):
return cls._disabled_profiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if (disabled == 'env' and func_qualname not in cls._profilers and
'all' not in cls._profilers): # don't do anything
return cls._disabled_profiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._mark_count = 0
obj._finished = False
obj._firstTime = obj._last_time = ptime.time()
obj._new_msg("> Entering " + obj._name)
return obj
def __call__(self, msg=None, *args):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._mark_count)
self._mark_count += 1
new_time = ptime.time()
elapsed = (new_time - self._last_time) * 1000
self._new_msg(" " + msg + ": %0.4f ms", *(args + (elapsed,)))
self._last_time = new_time
def mark(self, msg=None):
self(msg)
def _new_msg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
type(self)._msgs = []
| 34.52518
| 79
| 0.583663
| 587
| 4,799
| 4.599659
| 0.337308
| 0.018148
| 0.016296
| 0.013333
| 0.028889
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0092
| 0.320483
| 4,799
| 138
| 80
| 34.775362
| 0.818767
| 0.356324
| 0
| 0.202532
| 0
| 0
| 0.034343
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139241
| false
| 0.050633
| 0.037975
| 0
| 0.329114
| 0.025316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0abbc3e1d5afde9470d734d62bcb0511ac93cadd
| 5,390
|
py
|
Python
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | 1
|
2017-08-22T14:17:10.000Z
|
2017-08-22T14:17:10.000Z
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T00:47:22.000Z
|
2021-03-26T00:47:22.000Z
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pysdn.controller.controller import Controller
from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
def vr_demo_3():
f = "cfg4.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("\n")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,
nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s"
% (ctrlIpAddr, nodeName, nodeIpAddr))
print ("\n")
time.sleep(rundelay)
node_configured = False
result = ctrl.check_node_config_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONFIGURED)):
node_configured = True
print ("<<< '%s' is configured on the Controller" % nodeName)
elif(status.eq(STATUS.DATA_NOT_FOUND)):
node_configured = False
else:
print ("\n")
print "Failed to get configuration status for the '%s'" % nodeName
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
if node_configured is False:
result = ctrl.add_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< '%s' added to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n")
time.sleep(rundelay)
result = ctrl.check_node_conn_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONNECTED)):
print ("<<< '%s' is connected to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show configuration of the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_cfg()
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' configuration:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove '%s' NETCONF node from the Controller" % nodeName)
time.sleep(rundelay)
result = ctrl.delete_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' NETCONF node was successfully removed "
"from the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
vr_demo_3()
| 34.113924
| 78
| 0.62115
| 637
| 5,390
| 5.188383
| 0.343799
| 0.01997
| 0.026626
| 0.03177
| 0.288351
| 0.232073
| 0.232073
| 0.232073
| 0.232073
| 0.214523
| 0
| 0.007679
| 0.226902
| 5,390
| 157
| 79
| 34.33121
| 0.785457
| 0.281633
| 0
| 0.425743
| 0
| 0
| 0.254305
| 0.061457
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.059406
| null | null | 0.336634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0abcc62b08fba05c95b291d22e16bd5e45062b59
| 204
|
py
|
Python
|
Codility/python/tape_equilibrium.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 61
|
2020-09-26T19:57:44.000Z
|
2022-03-09T18:51:44.000Z
|
Codility/python/tape_equilibrium.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 88
|
2020-09-19T20:00:27.000Z
|
2021-10-31T09:41:57.000Z
|
Codility/python/tape_equilibrium.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 218
|
2020-09-20T08:18:03.000Z
|
2022-01-30T23:13:16.000Z
|
def solution(A):
total = sum(A)
m = float('inf')
left_sum = 0
for n in A[:-1]:
left_sum += n
v = abs(total - 2*left_sum)
if v < m:
m = v
return m
| 15.692308
| 35
| 0.426471
| 33
| 204
| 2.545455
| 0.575758
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.441176
| 204
| 12
| 36
| 17
| 0.710526
| 0
| 0
| 0
| 0
| 0
| 0.014778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0abf250849dcb075b82b1ca50e27cc3adefcc742
| 3,993
|
py
|
Python
|
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10
import sys, os
import copy
import random
import numpy as np
import multiprocessing as mp
import ConfigParser
sys.path.append('./bin')
import mGLS, mMGLS
sys.path.append('./src')
from EnvGlobals import Globals
import mgls_io
import mgls_mc
from mgls_lib import *
#definitions and constants
to_radians = pi/180.0
to_deg = 1.0/to_radians
#-------------------------
def _gls_instance_Ndim_bootstrapping(n_runs):
"""executes n_runs instances of MGLS for with previous data shuffle
"""
cpu_periodogram = list()
for iter in range(n_runs):
"""
#shuffle RV's and their errors. Repetition is not allowed
comb_rv_err = zip(Globals.rv, Globals.rv_err)
random.shuffle(comb_rv_err)
Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err)
"""
#allowing repetition
rv = [0.0]*len(Globals.time)
rv_err = [0.0]*len(Globals.time)
for i in range(len(Globals.time)):
index = int(random.uniform(0,len(Globals.time)))
rv[i] = Globals.rv[index]
rv_err[i] = Globals.rv_err[index]
Globals.rv = rv
Globals.rv_err = rv_err
opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000)
pwr_opt, fitting_coeffs, A = mgls(opt_state)
cpu_periodogram.append(pwr_opt) #save the best period determination (highest power)
return cpu_periodogram
def fap(bootstrapping_stats, pwr):
"""returns FAP for a given pwr. i.e. how many realizations overcome
a given power, over unit.
"""
return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats)
def fap_levels(bootstrapping_stats):
"""determines which power a FAP of 1, 0.1, 0.01 % is reached
"""
FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in %
n_bs = len(bootstrapping_stats)
#sort bootstrapping_stats vector ascendently
sorted_pwr = sorted(bootstrapping_stats)
return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))]
def parallel_Mdim_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
"""
except ZeroDivisionError:
print "Error: Zero division error. Restarted parallel bootstapping"
"""
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def parallel_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def Mdim_bootstrapping(max_pow):
"""
"""
#n_bootstrapping = 500 #iterations
bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping)
print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%"
print "FAP Levels:", fap_levels(bootstrapping_stats)
print "Total bootstapping samples: ", len(bootstrapping_stats)
return bootstrapping_stats
| 31.690476
| 91
| 0.672176
| 543
| 3,993
| 4.767956
| 0.313076
| 0.118192
| 0.023175
| 0.016995
| 0.4017
| 0.344921
| 0.323677
| 0.323677
| 0.27192
| 0.27192
| 0
| 0.020395
| 0.214125
| 3,993
| 125
| 92
| 31.944
| 0.804653
| 0.088405
| 0
| 0.381579
| 0
| 0.013158
| 0.031302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.157895
| null | null | 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ac18453ebf1417fb6591ada4674116fa981b20f
| 402
|
py
|
Python
|
biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
biserici_inlemnite/app/migrations/0096_bisericapage_datare_an.py
|
ck-tm/biserici-inlemnite
|
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.13 on 2021-10-29 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0095_bisericapage_utitle'),
]
operations = [
migrations.AddField(
model_name='bisericapage',
name='datare_an',
field=models.IntegerField(blank=True, null=True),
),
]
| 21.157895
| 61
| 0.606965
| 43
| 402
| 5.581395
| 0.813953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.278607
| 402
| 18
| 62
| 22.333333
| 0.758621
| 0.114428
| 0
| 0
| 1
| 0
| 0.135593
| 0.067797
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ac20eefa93e74fa6f679df0410321e3088f3827
| 664
|
py
|
Python
|
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py
|
bcgov/mds
|
6c427a66a5edb4196222607291adef8fd6677038
|
[
"Apache-2.0"
] | 25
|
2018-07-09T19:04:37.000Z
|
2022-03-15T17:27:10.000Z
|
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 983
|
2018-04-25T20:08:07.000Z
|
2022-03-31T21:45:20.000Z
|
services/core-api/app/api/mms_now_submissions/models/surface_bulk_sample_activity.py
|
areyeslo/mds
|
e8c38e593e09b78e2a57009c0d003d6c4bfa32e6
|
[
"Apache-2.0"
] | 58
|
2018-05-15T22:35:50.000Z
|
2021-11-29T19:40:52.000Z
|
from app.api.utils.models_mixins import Base
from app.extensions import db
class MMSSurfaceBulkSampleActivity(Base):
__tablename__ = "surface_bulk_sample_activity"
__table_args__ = {"schema": "mms_now_submissions"}
id = db.Column(db.Integer, primary_key=True)
messageid = db.Column(db.Integer, db.ForeignKey('mms_now_submissions.application.messageid'))
mms_cid = db.Column(db.Integer)
type = db.Column(db.String)
disturbedarea = db.Column(db.Numeric(14, 2))
timbervolume = db.Column(db.Numeric(14, 2))
quantity = db.Column(db.Integer)
def __repr__(self):
return '<MMSSurfaceBulkSampleActivity %r>' % self.id
| 36.888889
| 97
| 0.724398
| 85
| 664
| 5.388235
| 0.541176
| 0.122271
| 0.152838
| 0.148472
| 0.087336
| 0.087336
| 0
| 0
| 0
| 0
| 0
| 0.010714
| 0.156627
| 664
| 17
| 98
| 39.058824
| 0.807143
| 0
| 0
| 0
| 0
| 0
| 0.191265
| 0.14759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0.071429
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
0ac3e100821a287c22e2857e9d532f5d8e059c8b
| 2,723
|
py
|
Python
|
src/trusted/validator_arm/dgen_output.py
|
kapkic/native_client
|
51c8bc8c249d55606232ae011bdfc8b4cab3d794
|
[
"BSD-3-Clause"
] | 1
|
2021-12-23T00:36:43.000Z
|
2021-12-23T00:36:43.000Z
|
src/trusted/validator_arm/dgen_output.py
|
kapkic/native_client
|
51c8bc8c249d55606232ae011bdfc8b4cab3d794
|
[
"BSD-3-Clause"
] | null | null | null |
src/trusted/validator_arm/dgen_output.py
|
kapkic/native_client
|
51c8bc8c249d55606232ae011bdfc8b4cab3d794
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Some common boilerplates and helper functions for source code generation
in files dgen_test_output.py and dgen_decode_output.py.
"""
HEADER_BOILERPLATE ="""/*
* Copyright 2013 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can
* be found in the LICENSE file.
*/
// DO NOT EDIT: GENERATED CODE
"""
NOT_TCB_BOILERPLATE="""#ifndef NACL_TRUSTED_BUT_NOT_TCB
#error This file is not meant for use in the TCB
#endif
"""
NEWLINE_STR="""
"""
COMMENTED_NEWLINE_STR="""
//"""
"""Adds comment '// ' string after newlines."""
def commented_string(str, indent=''):
sep = NEWLINE_STR + indent + '//'
str = str.replace(NEWLINE_STR, sep)
# This second line is a hack to fix that sometimes newlines are
# represented as '\n'.
# TODO(karl) Find the cause of this hack, and fix it.
return str.replace('\\n', sep)
def ifdef_name(filename):
""" Generates the ifdef name to use for the given filename"""
return filename.replace("/", "_").replace(".", "_").upper() + "_"
def GetNumberCodeBlocks(separators):
"""Gets the number of code blocks to break classes into."""
num_blocks = len(separators) + 1
assert num_blocks >= 2
return num_blocks
def FindBlockIndex(filename, format, num_blocks):
"""Returns true if the filename matches the format with an
index in the range [1, num_blocks]."""
for block in range(1, num_blocks+1):
suffix = format % block
if filename.endswith(suffix):
return block
raise Exception("Can't find block index: %s" % filename)
def GetDecodersBlock(n, separators, decoders, name_fcn):
"""Returns the (sorted) list of decoders to include
in block n, assuming decoders are split using
the list of separators."""
num_blocks = GetNumberCodeBlocks(separators)
assert n > 0 and n <= num_blocks
return [decoder for decoder in decoders
if ((n == 1
or IsPrefixLeDecoder(separators[n-2], decoder, name_fcn)) and
(n == num_blocks or
not IsPrefixLeDecoder(separators[n-1], decoder, name_fcn)))]
def IsPrefixLeDecoder(prefix, decoder, name_fcn):
"""Returns true if the prefix is less than or equal to the
corresponding prefix length of the decoder name."""
decoder_name = name_fcn(decoder)
prefix_len = len(prefix)
decoder_len = len(decoder_name)
decoder_prefix = (decoder_name[0:prefix_len]
if prefix_len < decoder_len
else decoder_name)
return prefix <= decoder_prefix
| 31.298851
| 76
| 0.693353
| 386
| 2,723
| 4.772021
| 0.373057
| 0.043974
| 0.022801
| 0.023887
| 0.12595
| 0.12595
| 0.12595
| 0.12595
| 0.12595
| 0.12595
| 0
| 0.008756
| 0.203085
| 2,723
| 86
| 77
| 31.662791
| 0.840092
| 0.319868
| 0
| 0.06383
| 0
| 0
| 0.194604
| 0.013777
| 0
| 0
| 0
| 0.011628
| 0.042553
| 1
| 0.12766
| false
| 0
| 0
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ac42e49c824529d0aa71dbe888c2a691322545e
| 2,527
|
py
|
Python
|
ui_splash_screen.py
|
hirokiyaginuma/scriptspinner-software
|
87185f237f76feeee33a2b74a4d05be088bde011
|
[
"Unlicense"
] | null | null | null |
ui_splash_screen.py
|
hirokiyaginuma/scriptspinner-software
|
87185f237f76feeee33a2b74a4d05be088bde011
|
[
"Unlicense"
] | null | null | null |
ui_splash_screen.py
|
hirokiyaginuma/scriptspinner-software
|
87185f237f76feeee33a2b74a4d05be088bde011
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'splash_screen.ui'
##
## Created by: Qt User Interface Compiler version 5.15.1
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Splash_Screen(object):
def setupUi(self, Splash_Screen):
if not Splash_Screen.objectName():
Splash_Screen.setObjectName(u"Splash_Screen")
Splash_Screen.resize(720, 425)
self.centralwidget = QWidget(Splash_Screen)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.frame = QFrame(self.centralwidget)
self.frame.setObjectName(u"frame")
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.frame.setLineWidth(0)
self.label = QLabel(self.frame)
self.label.setObjectName(u"label")
self.label.setGeometry(QRect(0, 0, 720, 425))
self.label.setLineWidth(0)
self.label.setPixmap(QPixmap(u"img/SS_logo.jpg"))
self.label.setIndent(0)
self.progressBar = QProgressBar(self.frame)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(70, 330, 591, 41))
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" background-color:rgb(149, 165, 166);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk {\n"
" border-radius: 10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(210, 157, 255, 255), stop:1 rgba(156, 69, 255, 255));\n"
"}")
self.progressBar.setValue(24)
self.verticalLayout.addWidget(self.frame)
Splash_Screen.setCentralWidget(self.centralwidget)
self.retranslateUi(Splash_Screen)
QMetaObject.connectSlotsByName(Splash_Screen)
# setupUi
def retranslateUi(self, Splash_Screen):
Splash_Screen.setWindowTitle(QCoreApplication.translate("Splash_Screen", u"MainWindow", None))
self.label.setText("")
# retranslateUi
| 37.716418
| 140
| 0.646617
| 285
| 2,527
| 5.677193
| 0.435088
| 0.103832
| 0.038937
| 0.029666
| 0.02225
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042085
| 0.172537
| 2,527
| 66
| 141
| 38.287879
| 0.731707
| 0.091017
| 0
| 0.043478
| 1
| 0.021739
| 0.192743
| 0.024505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.065217
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ac98e5cdb6676a542021f48c116aa5fa733e705
| 16,208
|
py
|
Python
|
convoy/crypto.py
|
hebinhuang/batch-shipyard
|
f87d94850380bee273eb51c5c35381952a5722b8
|
[
"MIT"
] | null | null | null |
convoy/crypto.py
|
hebinhuang/batch-shipyard
|
f87d94850380bee273eb51c5c35381952a5722b8
|
[
"MIT"
] | null | null | null |
convoy/crypto.py
|
hebinhuang/batch-shipyard
|
f87d94850380bee273eb51c5c35381952a5722b8
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import base64
import collections
import getpass
import logging
import os
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
import tempfile
import stat
import subprocess
# local imports
from . import settings
from . import util
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_SSH_KEY_PREFIX = 'id_rsa_shipyard'
_REMOTEFS_SSH_KEY_PREFIX = '{}_remotefs'.format(_SSH_KEY_PREFIX)
# named tuples
PfxSettings = collections.namedtuple(
'PfxSettings', ['filename', 'passphrase', 'sha1'])
def get_ssh_key_prefix():
# type: (None) -> str
"""Get SSH key prefix
:rtype: str
:return: ssh key prefix
"""
return _SSH_KEY_PREFIX
def get_remotefs_ssh_key_prefix():
# type: (None) -> str
"""Get remote fs SSH key prefix
:rtype: str
:return: ssh key prefix for remote fs
"""
return _REMOTEFS_SSH_KEY_PREFIX
def generate_rdp_password():
# type: (None) -> str
"""Generate an RDP password
:rtype: str
:return: rdp password
"""
return base64.b64encode(os.urandom(8))
def generate_ssh_keypair(export_path, prefix=None):
# type: (str, str) -> tuple
"""Generate an ssh keypair for use with user logins
:param str export_path: keypair export path
:param str prefix: key prefix
:rtype: tuple
:return: (private key filename, public key filename)
"""
if util.is_none_or_empty(prefix):
prefix = _SSH_KEY_PREFIX
privkey = pathlib.Path(export_path, prefix)
pubkey = pathlib.Path(export_path, prefix + '.pub')
if privkey.exists():
old = pathlib.Path(export_path, prefix + '.old')
if old.exists():
old.unlink()
privkey.rename(old)
if pubkey.exists():
old = pathlib.Path(export_path, prefix + '.pub.old')
if old.exists():
old.unlink()
pubkey.rename(old)
logger.info('generating ssh key pair to path: {}'.format(export_path))
subprocess.check_call(
['ssh-keygen', '-f', str(privkey), '-t', 'rsa', '-N', ''''''])
return (privkey, pubkey)
def check_ssh_private_key_filemode(ssh_private_key):
# type: (pathlib.Path) -> bool
"""Check SSH private key filemode
:param pathlib.Path ssh_private_key: SSH private key
:rtype: bool
:return: private key filemode is ok
"""
def _mode_check(fstat, flag):
return bool(fstat & flag)
if util.on_windows():
return True
fstat = ssh_private_key.stat().st_mode
modes = frozenset((stat.S_IRWXG, stat.S_IRWXO))
return not any([_mode_check(fstat, x) for x in modes])
def connect_or_exec_ssh_command(
remote_ip, remote_port, ssh_private_key, username, sync=True,
shell=False, tty=False, ssh_args=None, command=None):
# type: (str, int, pathlib.Path, str, bool, bool, tuple, tuple) -> bool
"""Connect to node via SSH or execute SSH command
:param str remote_ip: remote ip address
:param int remote_port: remote port
:param pathlib.Path ssh_private_key: SSH private key
:param str username: username
:param bool sync: synchronous execution
:param bool shell: execute with shell
:param bool tty: allocate pseudo-tty
:param tuple ssh_args: ssh args
:param tuple command: command
:rtype: int or subprocess.Process
:return: return code or subprocess handle
"""
if not ssh_private_key.exists():
raise RuntimeError('SSH private key file not found at: {}'.format(
ssh_private_key))
# ensure file mode is set properly for the private key
if not check_ssh_private_key_filemode(ssh_private_key):
logger.warning(
'SSH private key filemode is too permissive: {}'.format(
ssh_private_key))
# execute SSH command
ssh_cmd = [
'ssh', '-o', 'StrictHostKeyChecking=no',
'-o', 'UserKnownHostsFile={}'.format(os.devnull),
'-i', str(ssh_private_key), '-p', str(remote_port),
]
if tty:
ssh_cmd.append('-t')
if util.is_not_empty(ssh_args):
ssh_cmd.extend(ssh_args)
ssh_cmd.append('{}@{}'.format(username, remote_ip))
if util.is_not_empty(command):
ssh_cmd.extend(command)
logger.info('{} node {}:{} with key {}'.format(
'connecting to' if util.is_none_or_empty(command)
else 'executing command on', remote_ip, remote_port, ssh_private_key))
if sync:
return util.subprocess_with_output(ssh_cmd, shell=shell)
else:
return util.subprocess_nowait_pipe_stdout(
ssh_cmd, shell=shell, pipe_stderr=True)
def derive_private_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a private key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def derive_public_key_pem_from_pfx(pfxfile, passphrase=None, pemfile=None):
# type: (str, str, str) -> str
"""Derive a public key pem file from a pfx
:param str pfxfile: pfx file
:param str passphrase: passphrase for pfx
:param str pemfile: path of pem file to write to
:rtype: str
:return: path of pem file
"""
if pfxfile is None:
raise ValueError('pfx file is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# convert pfx to pem
if pemfile is None:
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
pemfile = f.name
try:
# create pem from pfx
subprocess.check_call(
['openssl', 'pkcs12', '-nodes', '-in', pfxfile, '-out',
pemfile, '-password', 'pass:' + passphrase]
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', pemfile, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
except Exception:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
pemfile = None
return pemfile
def _parse_sha1_thumbprint_openssl(output):
# type: (str) -> str
"""Get SHA1 thumbprint from buffer
:param str buffer: buffer to parse
:rtype: str
:return: sha1 thumbprint of buffer
"""
# return just thumbprint (without colons) from the above openssl command
# in lowercase. Expected openssl output is in the form:
# SHA1 Fingerprint=<thumbprint>
return ''.join(util.decode_string(
output).strip().split('=')[1].split(':')).lower()
def get_sha1_thumbprint_pfx(pfxfile, passphrase):
# type: (str, str) -> str
"""Get SHA1 thumbprint of PFX
:param str pfxfile: name of the pfx file to export
:param str passphrase: passphrase for pfx
:rtype: str
:return: sha1 thumbprint of pfx
"""
if pfxfile is None:
raise ValueError('pfxfile is invalid')
if passphrase is None:
passphrase = getpass.getpass('Enter password for PFX: ')
# compute sha1 thumbprint of pfx
pfxdump = subprocess.check_output(
['openssl', 'pkcs12', '-in', pfxfile, '-nodes', '-passin',
'pass:' + passphrase]
)
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate(input=pfxdump)[0])
def get_sha1_thumbprint_pem(pemfile):
# type: (str) -> str
"""Get SHA1 thumbprint of PEM
:param str pfxfile: name of the pfx file to export
:rtype: str
:return: sha1 thumbprint of pem
"""
proc = subprocess.Popen(
['openssl', 'x509', '-noout', '-fingerprint', '-in', pemfile],
stdout=subprocess.PIPE
)
return _parse_sha1_thumbprint_openssl(proc.communicate()[0])
def generate_pem_pfx_certificates(config):
# type: (dict) -> str
"""Generate a pem and a derived pfx file
:param dict config: configuration dict
:rtype: str
:return: sha1 thumbprint of pfx
"""
# gather input
pemfile = settings.batch_shipyard_encryption_public_key_pem(config)
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
if pemfile is None:
pemfile = util.get_input('Enter public key PEM filename to create: ')
if pfxfile is None:
pfxfile = util.get_input('Enter PFX filename to create: ')
if passphrase is None:
while util.is_none_or_empty(passphrase):
passphrase = getpass.getpass('Enter password for PFX: ')
if len(passphrase) == 0:
print('passphrase cannot be empty')
privatekey = pemfile + '.key'
# generate pem file with private key and no password
f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
f.close()
try:
subprocess.check_call(
['openssl', 'req', '-new', '-nodes', '-x509', '-newkey',
'rsa:2048', '-keyout', privatekey, '-out', f.name, '-days', '730',
'-subj', '/C=US/ST=None/L=None/O=None/CN=BatchShipyard']
)
# extract public key from private key
subprocess.check_call(
['openssl', 'rsa', '-in', privatekey, '-pubout', '-outform',
'PEM', '-out', pemfile]
)
logger.debug('created public key PEM file: {}'.format(pemfile))
# convert pem to pfx for Azure Batch service
subprocess.check_call(
['openssl', 'pkcs12', '-export', '-out', pfxfile, '-inkey',
privatekey, '-in', f.name, '-certfile', f.name,
'-passin', 'pass:', '-passout', 'pass:' + passphrase]
)
logger.debug('created PFX file: {}'.format(pfxfile))
finally:
# remove rsa private key file
fp = pathlib.Path(privatekey)
if fp.exists():
fp.unlink()
# remove temp cert pem
fp = pathlib.Path(f.name)
if fp.exists():
fp.unlink()
# get sha1 thumbprint of pfx
return get_sha1_thumbprint_pfx(pfxfile, passphrase)
def get_encryption_pfx_settings(config):
# type: (dict) -> tuple
"""Get PFX encryption settings from configuration
:param dict config: configuration settings
:rtype: tuple
:return: pfxfile, passphrase, sha1 tp
"""
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
sha1_cert_tp = settings.batch_shipyard_encryption_pfx_sha1_thumbprint(
config)
# manually get thumbprint of pfx if not exists in config
if util.is_none_or_empty(sha1_cert_tp):
if pfx_passphrase is None:
pfx_passphrase = getpass.getpass('Enter password for PFX: ')
sha1_cert_tp = get_sha1_thumbprint_pfx(pfxfile, pfx_passphrase)
settings.set_batch_shipyard_encryption_pfx_sha1_thumbprint(
config, sha1_cert_tp)
return PfxSettings(
filename=pfxfile, passphrase=pfx_passphrase, sha1=sha1_cert_tp)
def _rsa_encrypt_string(data, config):
# type: (str, dict) -> str
"""RSA encrypt a string
:param str data: clear text data to encrypt
:param dict config: configuration dict
:rtype: str
:return: base64-encoded cipher text
"""
if util.is_none_or_empty(data):
raise ValueError('invalid data to encrypt')
inkey = settings.batch_shipyard_encryption_public_key_pem(config)
derived = False
if inkey is None:
# derive pem from pfx
derived = True
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(
config)
inkey = derive_public_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
try:
if inkey is None:
raise RuntimeError('public encryption key is invalid')
proc = subprocess.Popen(
['openssl', 'rsautl', '-encrypt', '-pubin', '-inkey', inkey],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ciphertext = util.base64_encode_string(
proc.communicate(input=util.encode_string(data))[0])
if proc.returncode != 0:
raise RuntimeError(
'openssl encryption failed with returncode: {}'.format(
proc.returncode))
return ciphertext
finally:
if derived:
fp = pathlib.Path(inkey)
if fp.exists():
fp.unlink()
def _rsa_decrypt_string_with_pfx(ciphertext, config):
# type: (str, dict) -> str
"""RSA decrypt a string
:param str ciphertext: cipher text in base64
:param dict config: configuration dict
:rtype: str
:return: decrypted cipher text
"""
if util.is_none_or_empty(ciphertext):
raise ValueError('invalid ciphertext to decrypt')
pfxfile = settings.batch_shipyard_encryption_pfx_filename(config)
pfx_passphrase = settings.batch_shipyard_encryption_pfx_passphrase(config)
pemfile = derive_private_key_pem_from_pfx(pfxfile, pfx_passphrase, None)
if pemfile is None:
raise RuntimeError('cannot decrypt without valid private key')
cleartext = None
try:
data = util.base64_decode_string(ciphertext)
proc = subprocess.Popen(
['openssl', 'rsautl', '-decrypt', '-inkey', pemfile],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cleartext = proc.communicate(input=data)[0]
finally:
fp = pathlib.Path(pemfile)
if fp.exists():
fp.unlink()
return cleartext
def encrypt_string(enabled, string, config):
# type: (bool, str, dict) -> str
"""Encrypt a string
:param bool enabled: if encryption is enabled
:param str string: string to encrypt
:param dict config: configuration dict
:rtype: str
:return: encrypted string if enabled
"""
if enabled:
return _rsa_encrypt_string(string, config)
else:
return string
| 35.311547
| 79
| 0.653258
| 2,038
| 16,208
| 5.052012
| 0.185476
| 0.028166
| 0.022727
| 0.03312
| 0.41521
| 0.379856
| 0.32566
| 0.271465
| 0.221445
| 0.197358
| 0
| 0.006029
| 0.24272
| 16,208
| 458
| 80
| 35.388646
| 0.832817
| 0.296088
| 0
| 0.363636
| 0
| 0
| 0.120575
| 0.008142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064394
| false
| 0.125
| 0.060606
| 0.003788
| 0.200758
| 0.049242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0acb3e8369864a2998734321cae251dc26fd05fa
| 2,884
|
py
|
Python
|
extractFeatures.py
|
PatrickJReed/Longboard
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | 1
|
2020-04-27T19:55:29.000Z
|
2020-04-27T19:55:29.000Z
|
extractFeatures.py
|
PatrickJReed/Longboard2
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | 1
|
2020-02-26T18:06:09.000Z
|
2020-02-26T18:06:09.000Z
|
extractFeatures.py
|
PatrickJReed/Longboard
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | null | null | null |
#!/home/ubuntu/miniconda2/bin/python
from __future__ import division
import sys
import glob, os, gc
import uuid
import os.path
import csv
import numpy as np
from time import time
from subprocess import (call, Popen, PIPE)
from itertools import product
import shutil
import re
import pickle
from boto3.session import Session
import boto3
import h5py
import umap
import hdbscan
from keras.models import load_model
from keras.models import Model
from keras import backend as K
from keras.utils import multi_gpu_model
##Path to Data
basepath = "/home/ubuntu/"
subject = sys.argv[1]
with open("config.txt") as f:
config = [line.rstrip() for line in f]
print config[0]
print config[1]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3 = boto3.client ('s3')
s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt"))
with open(subject + ".txt") as f:
Cells = [line.rstrip() for line in f]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5'))
feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5'))
parallel_model = multi_gpu_model(feat_extractor, gpus=2)
count = 0
for cell in Cells:
print(cell)
cell_size=0
cell_ids = []
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5'))
f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r')
cell_ids = f['ID']
for cid in cell_ids:
cid = cid.decode('utf-8')
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5'))
xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r')
os.remove(os.path.join(basepath,cell+'_'+cid+'.h5'))
if count == 0:
X = xyz['X']
Y = xyz['Y']
Z = parallel_model.predict(X, batch_size = 128)
count+=1
length = len(Y)
U = [cid] * length
else:
X = xyz['X']
Y = np.append(Y,xyz['Y'], axis=0)
z = feat_extractor.predict(X, batch_size = 128)
Z = np.append(Z,z, axis=0)
length = len(xyz['Y'])
U = U + ([cid] * length)
print(Z.shape)
hf = h5py.File(subject+'_ef.h5', 'w')
hf.create_dataset('Y', data=Y)
hf.create_dataset('Z', data=Z)
hf.close()
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5'))
call(['sudo', 'shutdown', '-h', 'now'])
| 31.692308
| 138
| 0.662968
| 452
| 2,884
| 4.09292
| 0.265487
| 0.048649
| 0.075676
| 0.077838
| 0.402703
| 0.38
| 0.342703
| 0.261081
| 0.215676
| 0.215676
| 0
| 0.02397
| 0.175451
| 2,884
| 91
| 139
| 31.692308
| 0.753995
| 0.016297
| 0
| 0.106667
| 0
| 0
| 0.093827
| 0.028571
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.293333
| null | null | 0.053333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0acf1290742f590cb6015abc57d74458d907cabb
| 1,164
|
py
|
Python
|
soil/build/lib/soil/openstack/snapshot.py
|
JackDan9/soil
|
ae612a4634634aace834491fbdefbc69e6167674
|
[
"MIT"
] | 1
|
2020-08-06T11:58:35.000Z
|
2020-08-06T11:58:35.000Z
|
soil/build/lib/soil/openstack/snapshot.py
|
JackDan9/soil
|
ae612a4634634aace834491fbdefbc69e6167674
|
[
"MIT"
] | 4
|
2019-12-13T11:27:28.000Z
|
2022-02-27T11:58:38.000Z
|
soil/build/lib/soil/openstack/snapshot.py
|
JackDan9/soil
|
ae612a4634634aace834491fbdefbc69e6167674
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Soil, Inc.
from soil.openstack.base import DataBase
from soil.openstack.base import SourceBase
class SnapshotData(DataBase):
"""A class for openstack snapshot data"""
def __init__(self, data):
self.data = data['snapshot']
class Snapshot(SourceBase):
"""A class for openstack snapshot"""
def __init__(self, plugin, source_id):
super(Snapshot, self).__init__(plugin, source_id)
self._snapshot_obj = None
@property
def snapshot_obj(self):
if self._snapshot_obj is not None:
return self._snapshot_obj
self._snapshot_obj = SnapshotData(self.show())
return self._snapshot_obj
def show(self):
return self.plugin.cinder.show_snapshot(self.source_id)
def delete(self):
self.plugin.cinder.delete_snapshot(self.source_id)
def is_created(self):
snapshot_info = self.show()
status = snapshot_info['snapshot']['status']
if status in ('available', ):
return True
self._check_failed_status(status)
return False
def is_delete(self):
pass
| 25.304348
| 63
| 0.636598
| 138
| 1,164
| 5.108696
| 0.311594
| 0.102128
| 0.106383
| 0.059574
| 0.215603
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0047
| 0.2689
| 1,164
| 45
| 64
| 25.866667
| 0.823737
| 0.079897
| 0
| 0.071429
| 0
| 0
| 0.029245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.035714
| 0.071429
| 0.035714
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0acf5c8efa495629dab15411d7c1138e6f73ca8f
| 1,417
|
py
|
Python
|
data_structures/queue/queue_on_pseudo_stack.py
|
hank-chou/python
|
a9f729fa263bce599d2774f3f6afb5a18bcc9862
|
[
"MIT"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
data_structures/queue/queue_on_pseudo_stack.py
|
hank-chou/python
|
a9f729fa263bce599d2774f3f6afb5a18bcc9862
|
[
"MIT"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
data_structures/queue/queue_on_pseudo_stack.py
|
hank-chou/python
|
a9f729fa263bce599d2774f3f6afb5a18bcc9862
|
[
"MIT"
] | 18
|
2020-02-09T13:00:11.000Z
|
2021-03-11T08:47:36.000Z
|
"""Queue represented by a pseudo stack (represented by a list with pop and append)"""
class Queue:
def __init__(self):
self.stack = []
self.length = 0
def __str__(self):
printed = "<" + str(self.stack)[1:-1] + ">"
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.stack.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.rotate(1)
dequeued = self.stack[self.length - 1]
self.stack = self.stack[:-1]
self.rotate(self.length - 1)
self.length = self.length - 1
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
temp = self.stack[0]
self.stack = self.stack[1:]
self.put(temp)
self.length = self.length - 1
"""Reports item at the front of self
@return item at front of self.stack"""
def front(self):
front = self.get()
self.put(front)
self.rotate(self.length - 1)
return front
"""Returns the length of this.stack"""
def size(self):
return self.length
| 24.431034
| 85
| 0.562456
| 180
| 1,417
| 4.383333
| 0.288889
| 0.152091
| 0.08365
| 0.076046
| 0.186312
| 0.058302
| 0
| 0
| 0
| 0
| 0
| 0.014463
| 0.316867
| 1,417
| 57
| 86
| 24.859649
| 0.80062
| 0.055752
| 0
| 0.133333
| 0
| 0
| 0.002158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.233333
| false
| 0
| 0
| 0.033333
| 0.4
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ad16ca68b13c3255bfd62c00d84e6b8aa940002
| 3,021
|
py
|
Python
|
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
import datetime
import calendar
import requests
import pandas as pd
import json
import os.path
import time
import MySQLdb as M
from gdax_history import timestamp_to_utcstr
def connect_to_db():
config = json.load(open('dbconn.json'))["mysql"]
db = M.connect(host = config["host"],
user = config["user"],
passwd = config["password"],
db = config["database"])
return db
def write_to_db(df, db):
print "Write %d entries to database." % df.shape[0]
cur = db.cursor()
try:
for row in df.itertuples():
ts = row.Time / 1000
cur.execute(
"""INSERT INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime)
VALUES (%s, %s, %s, %s, %s, %s, %s)""",
[ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)])
db.commit()
print "Write successfully!\n"
except (M.Error, M.Warning) as e:
print e
db.rollback()
def collect_data(start, end):
starttime = datetime.datetime.strptime(start, '%m/%d/%Y')
endtime = datetime.datetime.strptime(end, '%m/%d/%Y')
start_unixtime = calendar.timegm(starttime.utctimetuple())
end_unixtime = calendar.timegm(endtime.utctimetuple())
track_time = time.time() #because bitstamp only allows 10 requests per minute. Take rest if we are faster than that
count = 0
df = pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'])
while (start_unixtime < end_unixtime):
cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30 days at a time
if (cur_end_unixtime > end_unixtime):
cur_end_unixtime = end_unixtime #if the time is in future.
url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + "000", str(cur_end_unixtime) + "000") #1 hour can be changed to any timeframe
response = requests.get(url)
data = response.json()
df_tmp = pd.DataFrame(data)
df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']
#df.set_index('Time')
df = pd.concat([df, df_tmp])
start_unixtime = cur_end_unixtime + 60 #to prevent duplicates
count = count + 1
if (count == 10): #if 10 requests are made
count = 0 #reset it
diff = time.time() - track_time
if (diff <= 60):
print('Sleeping for {} seconds'.format(str(60 - diff)))
time.sleep(60 - diff) #sleep
track_time = time.time()
#bitstamp limits to 10 requests per minute
df = df.sort_values(by = ['Time'])
return df
def main():
db = connect_to_db()
df = collect_data(start = '09/24/2018', end = '09/26/2018')
write_to_db(df, db)
db.close()
if __name__ == "__main__":
main()
| 30.21
| 207
| 0.581595
| 398
| 3,021
| 4.28392
| 0.386935
| 0.058065
| 0.008798
| 0.009384
| 0.119648
| 0.081525
| 0.03871
| 0
| 0
| 0
| 0
| 0.031496
| 0.285336
| 3,021
| 99
| 208
| 30.515152
| 0.758221
| 0.098974
| 0
| 0.060606
| 0
| 0.015152
| 0.117899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.015152
| 0.136364
| null | null | 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ad19b186920402498e9734534abe48d50e505b7
| 2,154
|
py
|
Python
|
src/producers/connector.py
|
cvelas31/public_transportation_streaming
|
903a1a147645e1b0783555db4bfc02098f7941ae
|
[
"MIT"
] | null | null | null |
src/producers/connector.py
|
cvelas31/public_transportation_streaming
|
903a1a147645e1b0783555db4bfc02098f7941ae
|
[
"MIT"
] | null | null | null |
src/producers/connector.py
|
cvelas31/public_transportation_streaming
|
903a1a147645e1b0783555db4bfc02098f7941ae
|
[
"MIT"
] | null | null | null |
"""Configures a Kafka Connector for Postgres Station data"""
import json
import logging
import requests
from settings import Settings
logger = logging.getLogger(__name__)
KAFKA_CONNECT_URL = f"{Settings.URLs.KAFKA_CONNECT_URL}/connectors"
CONNECTOR_NAME = "stations"
def configure_connector():
"""Starts and configures the Kafka Connect connector"""
logging.debug("Creating or updating kafka connect connector...")
resp = requests.get(f"{KAFKA_CONNECT_URL}/{CONNECTOR_NAME}")
if resp.status_code == 200:
logging.debug("Connector already created skipping recreation")
return
config = {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.converter": "org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"topic.prefix": "com.connect.transportation.",
"connection.url": "jdbc:postgresql://postgres:5432/cta",
"connection.user": "cta_admin",
"connection.password": "chicago",
"batch.max.rows": "500",
"table.whitelist": "stations",
"poll.interval.ms": "5000", # Poll every 5 seconds
"mode": "incrementing",
"incrementing.column.name": "stop_id",
}
# TODO: Complete the Kafka Connect Config below.
# Directions: Use the JDBC Source Connector to connect to Postgres. Load the `stations` table
# using incrementing mode, with `stop_id` as the incrementing column name.
# Make sure to think about what an appropriate topic prefix would be, and how frequently Kafka
# Connect should run this connector (hint: not very often!)
data = json.dumps({"name": CONNECTOR_NAME, "config": config})
resp = requests.post(
KAFKA_CONNECT_URL,
headers={"Content-Type": "application/json"},
data=data,
)
# Ensure a healthy response was given
resp.raise_for_status()
logging.info("-------Connector created successfully-------")
if __name__ == "__main__":
configure_connector()
| 35.311475
| 98
| 0.679201
| 248
| 2,154
| 5.774194
| 0.532258
| 0.083799
| 0.041899
| 0.032123
| 0.065642
| 0.065642
| 0.065642
| 0
| 0
| 0
| 0
| 0.008656
| 0.19545
| 2,154
| 60
| 99
| 35.9
| 0.817657
| 0.243733
| 0
| 0
| 0
| 0
| 0.469603
| 0.220223
| 0
| 0
| 0
| 0.016667
| 0
| 1
| 0.025641
| false
| 0.025641
| 0.102564
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ad4a301cbaa49708e90318cda5d0db992bcc1f1
| 354
|
py
|
Python
|
controllers/albums.py
|
jeonginlee/groove_scheduler
|
84e61834e940e2ff138ffeeea61fd301f3c2a244
|
[
"MIT"
] | null | null | null |
controllers/albums.py
|
jeonginlee/groove_scheduler
|
84e61834e940e2ff138ffeeea61fd301f3c2a244
|
[
"MIT"
] | null | null | null |
controllers/albums.py
|
jeonginlee/groove_scheduler
|
84e61834e940e2ff138ffeeea61fd301f3c2a244
|
[
"MIT"
] | null | null | null |
from flask import *
albums = Blueprint('albums', __name__, template_folder='templates')
@albums.route('/albums/edit')
def albums_edit_route():
options = {
"edit": True
}
return render_template("albums.html", **options)
@albums.route('/albums')
def albums_route():
options = {
"edit": False
}
return render_template("albums.html", **options)
| 19.666667
| 67
| 0.700565
| 42
| 354
| 5.666667
| 0.428571
| 0.138655
| 0.142857
| 0.218487
| 0.310924
| 0.310924
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129944
| 354
| 18
| 68
| 19.666667
| 0.772727
| 0
| 0
| 0.285714
| 0
| 0
| 0.180282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ad4ca562029351bba499bd795e4d3faca8ffc96
| 3,113
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/views/division_views.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from dfirtrack_main.forms import DivisionForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Division
class DivisionList(LoginRequiredMixin, ListView):
login_url = '/login'
model = Division
template_name = 'dfirtrack_main/division/division_list.html'
context_object_name = 'division_list'
def get_queryset(self):
debug_logger(str(self.request.user), " DIVISION_LIST_ENTERED")
return Division.objects.order_by('division_name')
class DivisionDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Division
template_name = 'dfirtrack_main/division/division_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
division = self.object
division.logger(str(self.request.user), " DIVISION_DETAIL_ENTERED")
return context
class DivisionCreate(LoginRequiredMixin, CreateView):
login_url = '/login'
model = Division
form_class = DivisionForm
template_name = 'dfirtrack_main/division/division_add.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
debug_logger(str(request.user), " DIVISION_ADD_ENTERED")
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
division = form.save(commit=False)
division.save()
division.logger(str(request.user), " DIVISION_ADD_EXECUTED")
messages.success(request, 'Division added')
return redirect(reverse('division_detail', args=(division.division_id,)))
else:
return render(request, self.template_name, {'form': form})
class DivisionUpdate(LoginRequiredMixin, UpdateView):
login_url = '/login'
model = Division
form_class = DivisionForm
template_name = 'dfirtrack_main/division/division_edit.html'
def get(self, request, *args, **kwargs):
division = self.get_object()
form = self.form_class(instance=division)
division.logger(str(request.user), " DIVISION_EDIT_ENTERED")
return render(request, self.template_name, {'form': form})
def post(self, request, *args, **kwargs):
division = self.get_object()
form = self.form_class(request.POST, instance=division)
if form.is_valid():
division = form.save(commit=False)
division.save()
division.logger(str(request.user), " DIVISION_EDIT_EXECUTED")
messages.success(request, 'Division edited')
return redirect(reverse('division_detail', args=(division.division_id,)))
else:
return render(request, self.template_name, {'form': form})
| 40.428571
| 85
| 0.697719
| 356
| 3,113
| 5.924157
| 0.213483
| 0.045519
| 0.054054
| 0.034139
| 0.598388
| 0.562352
| 0.505453
| 0.475107
| 0.449028
| 0.449028
| 0
| 0
| 0.196595
| 3,113
| 76
| 86
| 40.960526
| 0.843263
| 0
| 0
| 0.454545
| 0
| 0
| 0.137488
| 0.088982
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
0ad6db55250893c680ef209759e33e069cabdd9a
| 4,292
|
py
|
Python
|
modules/stackoverflow/models.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | 1
|
2017-04-26T18:51:43.000Z
|
2017-04-26T18:51:43.000Z
|
modules/stackoverflow/models.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null |
modules/stackoverflow/models.py
|
tjsavage/polymer-dashboard
|
19bc467f1206613f8eec646b6f2bc43cc319ef75
|
[
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null |
import fix_path
import json
import datetime
from google.appengine.ext import ndb
# Taken from http://stackoverflow.com/questions/455580/json-datetime-between-python-and-javascript
dthandler = lambda obj: (
obj.isoformat()
if isinstance(obj, datetime.datetime)
or isinstance(obj, datetime.date)
else None
)
class StackOverflowSnapshot(ndb.Model):
"""Example Model"""
raw_timestamp = ndb.DateTimeProperty(required=True, auto_now_add=True)
requested_time = ndb.DateTimeProperty(required=True)
num_questions_by_tag = ndb.JsonProperty()
num_tagged_questions = ndb.IntegerProperty()
num_answered = ndb.IntegerProperty()
num_unanswered = ndb.IntegerProperty()
total_question_views = ndb.IntegerProperty()
status = ndb.StringProperty()
status_string = ndb.StringProperty()
def as_dict(self):
result = {}
result['requested_time'] = dthandler(self.requested_time)
result['num_tagged_questions'] = self.num_tagged_questions
result['num_questions_by_tag'] = self.num_questions_by_tag
result['num_answered'] = self.num_answered
result['num_unanswered'] = self.num_unanswered
result['total_question_views'] = self.total_question_views
result['status'] = self.status
result['status_string'] = self.status_string
return result
class StackOverflowQuestion(ndb.Model):
first_seen = ndb.DateTimeProperty(required=True, auto_now_add=True)
tags = ndb.StringProperty(repeated=True)
is_answered = ndb.BooleanProperty()
view_count = ndb.IntegerProperty()
answer_count = ndb.IntegerProperty()
url = ndb.StringProperty()
title = ndb.StringProperty()
creation_date = ndb.DateTimeProperty()
question_id = ndb.IntegerProperty()
def as_dict(self):
result = {}
result['first_seen'] = dthandler(self.first_seen)
result['tags'] = [t for t in self.tags]
result['is_answered'] = self.is_answered
result['view_count'] = self.view_count
result['answer_count'] = self.answer_count
result['url'] = self.url
result['title'] = self.title
result['creation_date'] = dthandler(self.creation_date)
result['question_id'] = self.question_id
return result
def update_to_stackexchange_question(self, stackexchange_question):
updated = False
if stackexchange_question.tags != self.tags:
self.tags = stackexchange_question.tags
updated = True
if stackexchange_question.json['is_answered'] != self.is_answered:
self.is_answered = stackexchange_question.json['is_answered']
updated = True
if stackexchange_question.view_count != self.view_count:
self.view_count = stackexchange_question.view_count
updated = True
if stackexchange_question.json['answer_count'] != self.answer_count:
self.answer_count = stackexchange_question.json['answer_count']
updated = True
if stackexchange_question.url != self.url:
self.url = stackexchange_question.url
updated = True
if stackexchange_question.title != self.title:
self.title = stackexchange_question.title
updated = True
if stackexchange_question.creation_date != self.creation_date:
self.creation_date = stackexchange_question.creation_date
updated = True
if stackexchange_question.json['question_id'] != self.question_id:
self.question_id = stackexchange_question.json['question_id']
updated = True
return updated
@classmethod
def from_stackexchange_question(cls, stackexchange_question):
result = cls(
tags = [t for t in stackexchange_question.tags],
is_answered = stackexchange_question.json['is_answered'],
view_count = stackexchange_question.view_count,
answer_count = stackexchange_question.json['answer_count'],
url = stackexchange_question.url,
title = stackexchange_question.title,
creation_date = stackexchange_question.creation_date,
question_id = stackexchange_question.json['question_id']
)
return result
| 40.490566
| 98
| 0.682199
| 467
| 4,292
| 6.012848
| 0.188437
| 0.209402
| 0.080128
| 0.064815
| 0.389601
| 0.259972
| 0.12963
| 0.032051
| 0
| 0
| 0
| 0.001807
| 0.226468
| 4,292
| 106
| 99
| 40.490566
| 0.843976
| 0.025862
| 0
| 0.16129
| 0
| 0
| 0.071839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.043011
| 0
| 0.344086
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0add5b092c6c665d2b618a20a05d4cd299d00402
| 1,948
|
py
|
Python
|
src/handler.py
|
MrIgumnov96/ETL-CloudDeployment
|
666b85a9350460fba49f82ec90f5cddc0bdd0235
|
[
"Unlicense"
] | null | null | null |
src/handler.py
|
MrIgumnov96/ETL-CloudDeployment
|
666b85a9350460fba49f82ec90f5cddc0bdd0235
|
[
"Unlicense"
] | null | null | null |
src/handler.py
|
MrIgumnov96/ETL-CloudDeployment
|
666b85a9350460fba49f82ec90f5cddc0bdd0235
|
[
"Unlicense"
] | null | null | null |
import boto3
import src.app as app
import csv
import psycopg2 as ps
import os
from dotenv import load_dotenv
load_dotenv()
dbname = os.environ["db"]
host = os.environ["host"]
port = os.environ["port"]
user = os.environ["user"]
password = os.environ["pass"]
connection = ps.connect(dbname=dbname,
host=host,
port=port,
user=user,
password=password)
def handle(event, context):
cursor = connection.cursor()
cursor.execute("SELECT 1", ())
print(cursor.fetchall())
# Get key and bucket informaition
key = event['Records'][0]['s3']['object']['key']
bucket = event['Records'][0]['s3']['bucket']['name']
# use boto3 library to get object from S3
s3 = boto3.client('s3')
s3_object = s3.get_object(Bucket = bucket, Key = key)
data = s3_object['Body'].read().decode('utf-8')
all_lines = []
# read CSV
# csv_data = csv.reader(data.splitlines())
# for row in csv_data:
# datestr = row[0] #.replace('/', '-')
# # print(datestr)
# date_obj = datetime.strptime(datestr, '%d/%m/%Y %H:%M')
# # print(date_obj)
# # time = str(row[0][-5:])
# location = str(row[1])
# order = str(row[3])
# total = str(row[4])
# all_lines.append({'date':date_obj, 'location':location, 'order':order, 'total':total})
# return cached_list
# print(all_lines)
app.start_app(all_lines, data)
print_all_lines = [print(line) for line in all_lines]
print_all_lines
return {"message": "success!!! Check the cloud watch logs for this lambda in cloudwatch https://eu-west-1.console.aws.amazon.com/cloudwatch/home?region=eu-west-1#logsV2:log-groups"}
# Form all the lines of data into a list of lists
# all_lines = [line for line in csv_data]
# print(data)
# print(all_lines)
| 31.419355
| 185
| 0.587269
| 259
| 1,948
| 4.324324
| 0.42471
| 0.064286
| 0.046429
| 0.026786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017446
| 0.264374
| 1,948
| 62
| 186
| 31.419355
| 0.764131
| 0.325975
| 0
| 0
| 0
| 0.032258
| 0.186191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0.064516
| 0.193548
| 0
| 0.258065
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0ae19706ac78f27bbbf84e3668bc38423a4a2fcd
| 739
|
py
|
Python
|
feaas/runners/__init__.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 3
|
2015-05-04T03:20:09.000Z
|
2016-02-19T10:35:35.000Z
|
feaas/runners/__init__.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 3
|
2015-01-02T13:18:56.000Z
|
2021-02-08T20:17:14.000Z
|
feaas/runners/__init__.py
|
tsuru/varnishapi
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
[
"BSD-3-Clause"
] | 5
|
2015-01-02T13:11:45.000Z
|
2016-08-26T06:14:35.000Z
|
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
from feaas import storage
class Base(object):
def __init__(self, manager, interval, *locks):
self.manager = manager
self.storage = manager.storage
self.interval = interval
def init_locker(self, *lock_names):
self.locker = storage.MultiLocker(self.storage)
for lock_name in lock_names:
self.locker.init(lock_name)
def loop(self):
self.running = True
while self.running:
self.run()
time.sleep(self.interval)
def stop(self):
self.running = False
| 24.633333
| 57
| 0.649526
| 96
| 739
| 4.90625
| 0.5625
| 0.070064
| 0.055202
| 0.080679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007421
| 0.270636
| 739
| 29
| 58
| 25.482759
| 0.866419
| 0.207037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ae880533e14de2255d5554b8a0bb6b7cbc5e1bb
| 1,089
|
py
|
Python
|
Assignment 1 n 2 Day 8.py
|
paju3125/LetsUpgrade-Python-B7
|
c5767361f60f1ec405ab235af85035e2bb9a71e3
|
[
"Apache-2.0"
] | null | null | null |
Assignment 1 n 2 Day 8.py
|
paju3125/LetsUpgrade-Python-B7
|
c5767361f60f1ec405ab235af85035e2bb9a71e3
|
[
"Apache-2.0"
] | null | null | null |
Assignment 1 n 2 Day 8.py
|
paju3125/LetsUpgrade-Python-B7
|
c5767361f60f1ec405ab235af85035e2bb9a71e3
|
[
"Apache-2.0"
] | null | null | null |
# Assignment 1 Day 8
# write a decorator function for taking input for you
# any kind of function you want to build
def getInput(calculate_arg_fuc):
def wrap_function():
print("Enter two numbers ")
a=int(input("Enter first number = "))
b=int(input("Enter second number = "))
calculate_arg_fuc(a,b)
return wrap_function
@getInput
def addition(num1,num2):
print("Addition = ",num1+num2)
@getInput
def subtraction(num1,num2):
print("Subtraction = ",num1-num2)
@getInput
def multiplication(num1,num2):
print("Multiplication = ",num1*num2)
@getInput
def division(num1,num2):
print("Division = ",num1/num2)
addition()
subtraction()
multiplication()
division()
# Assignment 2 day 8
# you need to develop a python program to open a file in read only mode and
# try writing something to it and handlethe subsequent errorusing Exception Handling
try:
f=open("abc.txt","r");
f.write("Heyy, i am prajval");
f.close();
except:
print("File is in read only mode...")
| 22.22449
| 85
| 0.651974
| 146
| 1,089
| 4.821918
| 0.493151
| 0.090909
| 0.073864
| 0.080966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024067
| 0.236915
| 1,089
| 48
| 86
| 22.6875
| 0.823105
| 0.264463
| 0
| 0.137931
| 0
| 0
| 0.226542
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206897
| false
| 0
| 0
| 0
| 0.241379
| 0.206897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0aea8c17200ee38f7b989cd3fe4ee1c7be72a125
| 4,286
|
py
|
Python
|
modox/chan_modifier.py
|
lukpazera/modox
|
4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01
|
[
"MIT"
] | 11
|
2021-02-19T17:11:04.000Z
|
2021-12-03T17:14:58.000Z
|
modox/chan_modifier.py
|
lukpazera/modox
|
4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01
|
[
"MIT"
] | null | null | null |
modox/chan_modifier.py
|
lukpazera/modox
|
4ee5a6033e405f9f7f3a7c80a1cb3c558c90fb01
|
[
"MIT"
] | null | null | null |
import lx
import modo
import select
import item
from run import run
class ChannelModifierUtils(object):
@classmethod
def attachModifierToItem(cls, modifierModoItem, hostModoItem):
"""
Allows for attaching modifier to locator type item.
Attached item will show up under the locator item in item list
(you can unfold it with a little plus icons next to item name in item list).
Attached modifiers are getting deleted together with locator they are attached to.
Parameters
----------
modifierModoItem : modo.Item
Modifier item that you want to attach.
hostModoItem : modo.Item
Locator type item you want to attach modifier to.
"""
item.ItemUtils.addForwardGraphConnections(modifierModoItem, hostModoItem, 'chanMods')
class TransformConstraintOperation(object):
POSITION = 'pos'
ROTATION = 'rot'
SCALE = 'scl'
class CMTransformConstraint(object):
"""
This class represents Transform Constraint channel modifier.
Parameters
----------
modoItem : modo.Item
The constraint modo item.
"""
Operation = TransformConstraintOperation
@classmethod
def new(cls, assemblyItem, hostItem, name='TransformConstraint'):
"""
Adds new transform constraint to the scene.
Parameters
----------
assemblyItem : modo.Item
This is assembly item to which the constraint will be added.
Passing this item is mandatory. However, if you don't want to add constraints
to any assembly pass an item that is not a group.
This doesn't throw an error and it doesn't add constraint to any groups either.
hostItem : modo.Item
Constraint can be attached to an item such that it'll be under this item
in item list. It'll also get deleted when the host item is deleted.
name : str
Name for new constraint item.
Returns
-------
CMTransformConstraint
"""
itemSelection = select.ItemSelection()
itemSelection.clear()
run('modifier.create "cmTransformConstraint:rot" item:{%s} insert:false' % assemblyItem.id)
cnsItem = itemSelection.getOfTypeModo("cmTransformConstraint")[0]
cnsItem.name = name
ChannelModifierUtils.attachModifierToItem(cnsItem, hostItem)
return CMTransformConstraint(cnsItem)
@property
def operation(self):
"""
Gets the type of the constraint.
Returns
-------
str
One of TransformConstraintOperation constants.
"""
return self._item.channel('operation').get()
@property
def inputChannel(self):
return self._item.channel('matrixInput')
@property
def outputChannel(self):
return self._item.channel('matrixOutput')
@property
def isRotationConstraint(self):
"""
Tests if this is rotation constraint.
Returns
-------
bool
"""
return self.operation == self.Operation.ROTATION
@property
def offset(self):
"""
Gets the constraint offset vector.
Returns
-------
modo.Vector3
"""
x = self._item.channel('offset.X').get()
y = self._item.channel('offset.Y').get()
z = self._item.channel('offset.Z').get()
return modo.Vector3(x, y, z)
@offset.setter
def offset(self, offsetVec):
"""
Sets new offset for the constraint.
Parameters
----------
offsetVec : modo.Vector3
"""
self._item.channel('offset.X').set(offsetVec[0], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Y').set(offsetVec[1], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
self._item.channel('offset.Z').set(offsetVec[2], 0.0, key=False, action=lx.symbol.s_ACTIONLAYER_SETUP)
@property
def modoItem(self):
return self._item
# -------- Private methods
def __init__(self, modoItem):
if modoItem.type != 'cmTransformConstraint':
raise TypeError
self._item = modoItem
| 28.573333
| 110
| 0.614326
| 459
| 4,286
| 5.690632
| 0.335512
| 0.033691
| 0.051685
| 0.048239
| 0.116769
| 0.06317
| 0.06317
| 0.06317
| 0.06317
| 0.06317
| 0
| 0.004274
| 0.290247
| 4,286
| 150
| 111
| 28.573333
| 0.854372
| 0.365376
| 0
| 0.148148
| 0
| 0
| 0.100493
| 0.030956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185185
| false
| 0
| 0.092593
| 0.055556
| 0.537037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
0af1a3c68967c05606abe6a22eb2bbc2a17f6f6f
| 1,164
|
py
|
Python
|
tests/serverless/checks/aws/test_AdminPolicyDocument.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
tests/serverless/checks/aws/test_AdminPolicyDocument.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
tests/serverless/checks/aws/test_AdminPolicyDocument.py
|
peaudecastor/checkov
|
a4804b61c1b1390b7abd44ab53285fcbc3e7e80b
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
from checkov.serverless.checks.function.aws.AdminPolicyDocument import check
from checkov.serverless.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestAdminPolicyDocument(unittest.TestCase):
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
# Used in
os.environ["sneaky_var"] = "*"
test_files_dir = current_dir + "/example_AdminPolicyDocument"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
self.assertEqual(summary['passed'], 2,
f"Passed checks: {[fc.file_path for fc in report.passed_checks]}")
self.assertEqual(summary['failed'], 6,
f"Failed checks: {[fc.file_path for fc in report.failed_checks]}")
self.assertEqual(summary['skipped'], 0,
f"Skipped checks: {[fc.file_path for fc in report.skipped_checks]}")
self.assertEqual(summary['parsing_errors'], 0)
if __name__ == '__main__':
unittest.main()
| 36.375
| 102
| 0.668385
| 137
| 1,164
| 5.445255
| 0.40146
| 0.080429
| 0.117962
| 0.064343
| 0.116622
| 0.116622
| 0.116622
| 0.116622
| 0
| 0
| 0
| 0.00441
| 0.22079
| 1,164
| 31
| 103
| 37.548387
| 0.818082
| 0.006014
| 0
| 0
| 0
| 0
| 0.232035
| 0.082251
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.045455
| false
| 0.090909
| 0.227273
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
0af3c3569db12057875193547cf2329c8c03ae92
| 581
|
py
|
Python
|
api/views/stores/att_handler.py
|
cderwin/maps
|
0146260935a749679396022b6d2b1d90b6df2539
|
[
"MIT"
] | null | null | null |
api/views/stores/att_handler.py
|
cderwin/maps
|
0146260935a749679396022b6d2b1d90b6df2539
|
[
"MIT"
] | 7
|
2016-02-09T07:18:48.000Z
|
2016-02-09T07:25:40.000Z
|
api/views/stores/att_handler.py
|
cderwin/maps
|
0146260935a749679396022b6d2b1d90b6df2539
|
[
"MIT"
] | null | null | null |
from .default_handler import StoresHandler
class ATTStoresHandler(StoresHandler):
def handle_request(self, **kwargs):
kwargs.update({'provider': 'att'})
return super(ATTStoresHandler, self).handle_request(**kwargs)
def get_url(self, **kwargs):
lat = float(kwargs.get('lat'))
lon = float(kwargs.get('lon'))
sw_corner = "{0},{1}".format(lat - 1, lon - 1)
ne_corner = "{0},{1}".format(lat + 1, lon + 1)
return self.config[kwargs['provider']]['url'].format(lat=lat, lon=lon, sw_corner=sw_corner, ne_corner=ne_corner)
| 36.3125
| 120
| 0.636833
| 76
| 581
| 4.736842
| 0.381579
| 0.066667
| 0.077778
| 0.077778
| 0.122222
| 0.122222
| 0.122222
| 0.122222
| 0
| 0
| 0
| 0.017058
| 0.192771
| 581
| 15
| 121
| 38.733333
| 0.750533
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.545455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
0af3eac5180ad01027c97600a407eb3106203f56
| 349
|
py
|
Python
|
pythonProject/MUNDO 2/Desafio 54.py
|
lucasjlgc/Aulas-de-Python-
|
6aaed1c660487a680e9c449210600ccdfa326612
|
[
"MIT"
] | null | null | null |
pythonProject/MUNDO 2/Desafio 54.py
|
lucasjlgc/Aulas-de-Python-
|
6aaed1c660487a680e9c449210600ccdfa326612
|
[
"MIT"
] | 1
|
2021-06-25T15:29:11.000Z
|
2021-06-25T15:29:11.000Z
|
pythonProject/MUNDO 2/Desafio 54.py
|
lucasjlgc/Aulas-de-Python-
|
6aaed1c660487a680e9c449210600ccdfa326612
|
[
"MIT"
] | null | null | null |
#Leia o ano de nascimento de 7 pessoas e mostre quantas ja atingiram a maioridade e quantas ainda nรฃo
for c in range(1,8):
p=int(input('Qual o ano de seu nascimento? '))
a=2021-p
if a>= 18:
print('A pessoa numero {} jรก รฉ maior de idade'.format(c))
else:
print('A pessoa numero {} nรฃo รฉ maior de idade!'.format(c))
| 29.083333
| 101
| 0.638968
| 63
| 349
| 3.539683
| 0.619048
| 0.035874
| 0.053812
| 0.161435
| 0.179372
| 0.179372
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.252149
| 349
| 11
| 102
| 31.727273
| 0.819923
| 0.286533
| 0
| 0
| 0
| 0
| 0.435484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0af766c917854c90cf7eae087d9105162f3eb248
| 8,667
|
py
|
Python
|
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_glm
import h2o_util
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[<col1>])',
]
DO_SUMMARY = False
DO_COMPARE_SUM = False
def write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution):
# we can do all sorts of methods off the r object
r = random.Random(SEEDPERFILE)
def addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict):
# colNumber should not be 0, because the output will be there
## val = r.uniform(MIN,MAX)
val = r.triangular(valMin,valMax,0)
valFormatted = h2o_util.fp_format(val, sel)
# force it to be zero in this range. so we don't print zeroes for svm!
if (val > valMin/2) and (val < valMax/2):
return None
else:
rowData.append(str(colNumber) + ":" + valFormatted) # f should always return string
if colNumber in synColSumDict:
synColSumDict[colNumber] += val # sum of column (dict)
else:
synColSumDict[colNumber] = val # sum of column (dict)
return val
valMin = -1e2
valMax = 1e2
classMin = -36
classMax = 36
dsf = open(csvPathname, "w+")
synColSumDict = {0: 0} # guaranteed to have col 0 for output
# even though we try to get a max colCount with random, we might fall short
# track what max we really got
colNumberMax = 0
for i in range(rowCount):
rowData = []
d = random.randint(0,2)
if d==0:
if distribution == 'sparse':
# only one value per row!
# is it okay to specify col 0 in svm? where does the output data go? (col 0)
colNumber = random.randint(1, colCount)
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
# did we add a val?
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
else:
# some number of values per row.. 50% or so?
for colNumber in range(1, colCount+1):
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
# always need an output class, even if no cols are non-zero
# space is the only valid separator
# add the output (col 0)
# random integer for class
val = random.randint(classMin,classMax)
rowData.insert(0, val)
synColSumDict[0] += val # sum of column (dict)
rowDataCsv = " ".join(map(str,rowData))
# FIX! vary the eol ?
# randomly skip some rows. only write 1/3
dsf.write(rowDataCsv + "\n")
dsf.close()
return (colNumberMax, synColSumDict)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_fp_formats_libsvm_2(self):
# h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 10000, 'cA', 300, 'sparse50'),
(100, 10000, 'cB', 300, 'sparse'),
# (100, 40000, 'cC', 300, 'sparse50'),
# (100, 40000, 'cD', 300, 'sparse'),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs, distribution) in tryList:
NUM_CASES = h2o_util.fp_format()
for sel in [random.randint(0,NUM_CASES-1)]: # len(caseList)
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = "syn_%s_%s_%s_%s.csv" % (SEEDPERFILE, sel, rowCount, colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
# dict of col sums for comparison to exec col sums below
(colNumberMax, synColSumDict) = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution)
selKey2 = hex_key + "_" + str(sel)
print "This dataset requires telling h2o parse it's a libsvm..doesn't detect automatically"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=selKey2,
timeoutSecs=timeoutSecs, doSummary=False, parser_type='SVMLight')
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
print "\n" + csvFilename
# SUMMARY****************************************
# gives us some reporting on missing values, constant values,
# to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=0,
key=parseResult['destination_key'], timeoutSecs=300, noPrint=True)
if DO_SUMMARY:
summaryResult = h2o_cmd.runSummary(key=selKey2, max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
self.assertEqual(colNumberMax+1, num_cols, msg="generated %s cols (including output). parsed to %s cols" % (colNumberMax+1, num_cols))
# Exec (column sums)*************************************************
if DO_COMPARE_SUM:
h2e.exec_zero_list(zeroList)
colResultList = h2e.exec_expr_list_across_cols(None, exprList, selKey2, maxCol=colNumberMax+1,
timeoutSecs=timeoutSecs)
print "\n*************"
print "colResultList", colResultList
print "*************"
self.assertEqual(rowCount, num_rows, msg="generated %s rows, parsed to %s rows" % (rowCount, num_rows))
# need to fix this for compare to expected
# we should be able to keep the list of fp sums per col above
# when we generate the dataset
### print "\nsynColSumDict:", synColSumDict
for k,v in synColSumDict.iteritems():
if DO_COMPARE_SUM:
# k should be integers that match the number of cols
self.assertTrue(k>=0 and k<len(colResultList))
compare = colResultList[k]
print "\nComparing col sums:", v, compare
# Even though we're comparing floating point sums, the operations probably should have
# been done in same order, so maybe the comparison can be exact (or not!)
self.assertAlmostEqual(v, compare, places=0,
msg='%0.6f col sum is not equal to expected %0.6f' % (v, compare))
synMean = (v + 0.0)/rowCount
# enums don't have mean, but we're not enums
mean = float(inspect['cols'][k]['mean'])
# our fp formats in the syn generation sometimes only have two places?
self.assertAlmostEqual(mean, synMean, places=0,
msg='col %s mean %0.6f is not equal to generated mean %0.6f' % (k, mean, synMean))
num_missing_values = inspect['cols'][k]['num_missing_values']
self.assertEqual(0, num_missing_values,
msg='col %s num_missing_values %d should be 0' % (k, num_missing_values))
if __name__ == '__main__':
h2o.unit_main()
| 45.376963
| 151
| 0.572055
| 989
| 8,667
| 4.905966
| 0.336704
| 0.016076
| 0.016488
| 0.02535
| 0.128813
| 0.125103
| 0.112531
| 0.02803
| 0.02803
| 0
| 0
| 0.025137
| 0.325257
| 8,667
| 190
| 152
| 45.615789
| 0.804549
| 0.219222
| 0
| 0.113821
| 0
| 0
| 0.097485
| 0.00387
| 0
| 0
| 0
| 0
| 0.04878
| 0
| null | null | 0
| 0.03252
| null | null | 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0af8af43646ac075b324487dffc3942d97354220
| 1,145
|
py
|
Python
|
examples/rpc_server_side.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | 1
|
2016-10-24T05:30:25.000Z
|
2016-10-24T05:30:25.000Z
|
examples/rpc_server_side.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rpc_server_side.py
|
vijayp/SleekXMPP
|
b2e7f57334d27f140f079213c2016615b7168742
|
[
"BSD-3-Clause"
] | null | null | null |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Dann Martens
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.xep_0009.remote import Endpoint, remote, Remote, \
ANY_ALL
import threading
class Thermostat(Endpoint):
def FQN(self):
return 'thermostat'
def __init(self, initial_temperature):
self._temperature = initial_temperature
self._event = threading.Event()
@remote
def set_temperature(self, temperature):
print("Setting temperature to %s" % temperature)
self._temperature = temperature
@remote
def get_temperature(self):
return self._temperature
@remote(False)
def release(self):
self._event.set()
def wait_for_release(self):
self._event.wait()
def main():
session = Remote.new_session('[email protected]/rpc', '*****')
thermostat = session.new_handler(ANY_ALL, Thermostat, 18)
thermostat.wait_for_release()
session.close()
if __name__ == '__main__':
main()
| 22.019231
| 73
| 0.627074
| 125
| 1,145
| 5.512
| 0.472
| 0.108853
| 0.113208
| 0.058055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012063
| 0.275983
| 1,145
| 52
| 74
| 22.019231
| 0.819059
| 0.125764
| 0
| 0.071429
| 0
| 0
| 0.067485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.071429
| 0.071429
| 0.428571
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0afb2dc8c2daf11d9a82ca819aeffdafacc6c971
| 2,515
|
py
|
Python
|
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
import random
import numpy
import copy
class Graph:
"""n่กจ็คบๅพไธญ็น็ไธชๆฐ๏ผm่กจ็คบๅพไธญ่พน็ไธชๆฐ"""
def __init__(self, n, m, edge_weight=1, directed=True, connected='weak', loop=False, weighted=False, trim=True):
"""
n ๅพไธญ็น็ไธชๆฐ
m ๅพไธญ่พน็ไธชๆฐ
edge_weight ่พน็ๆๅผไธ้
directed ๆๅๆง
connected ่ฟ้ๆง
loop ๆ็ฏๆง
weighted ๅธฆๆๆง
trim True:็น็ผๅทไป1ๅผๅง False:็น็ผๅทไป0ๅผๅง
"""
self.directed = directed
self.weighted = weighted
self.connected = connected
self.loop = loop
self.trim = trim
if directed==True and connected=='weak' and loop==False:#ๅผฑ่ฟ้ๆๅๆ ็ฏ
self.n = n
self.m = m
self.matr = numpy.zeros((n, n))
self.topo = list(range(n))
random.shuffle(self.topo)
self.RandomGenerTopoEdges(m-(n-1))
weak_connected = self.CheckWeakConnectivity()
if weak_connected:
self.RandomGenerTopoEdges(n-1)
else:
count = 0
for i in range(n-1):
if self.matr[self.topo[i]][self.topo[i+1]]!=1:
self.matr[self.topo[i]][self.topo[i+1]]=1
count = count+1
self.RandomGenerTopoEdges(n-1-count)
self.edges = list()
for i in range(n):
for j in range(n):
if self.matr[i][j]==1:
e = (i, j)
self.edges.append(e)
"""ๆฃๆฅๅพ็ๅผฑ่ฟ้ๆง"""
def CheckWeakConnectivity(self):
temp = copy.deepcopy(self.matr)
for i in range(self.n):
for j in range(self.n):
if temp[i][j]==1:
temp[j][i]=1
elif temp[j][i]==1:
temp[i][j]=1
for i in range(self.n-1):
if i==0:
result = temp.dot(temp)
else:
result = result.dot(temp)
for i in range(self.n):
for j in range(self.n):
if result[i][j]==0 and i!=j:
return False
return True
"""ๅจๅพไธญ้ๆบ็ๆedge_numๆก่พน"""
def RandomGenerTopoEdges(self, edge_num):
for i in range(edge_num):
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
while self.matr[self.topo[st]][self.topo[end]] != 0:
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
self.matr[self.topo[st]][self.topo[end]] = 1
"""ไปฅๅญ็ฌฆไธฒ่ฟๅ็ฌฌiๆก่พน็ไฟกๆฏ"""
def GetEdge(self, i):
if self.trim:#็นไป1ๅผๅง
if self.weighted == False:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1)
else:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1) + random.randint(1, edge_weight)
else:#็นไป0ๅผๅง
if self.weighted == False:
return str(self.edges[i][0]) + " " + str(self.edges[i][1])
else:
return str(self.edges[i][0]) + " " + str(self.edges[i][1]) + random.randint(1, edge_weight)
| 27.043011
| 113
| 0.622664
| 407
| 2,515
| 3.815725
| 0.184275
| 0.035415
| 0.061816
| 0.066967
| 0.388281
| 0.365744
| 0.324533
| 0.324533
| 0.282035
| 0.282035
| 0
| 0.024549
| 0.206362
| 2,515
| 92
| 114
| 27.336957
| 0.753507
| 0.006759
| 0
| 0.236111
| 0
| 0
| 0.005314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0afbde7fb6ef3a1d965ab24316c2720252ada994
| 970
|
py
|
Python
|
csv2googlesheets/to_google_sheets.py
|
AlexSkrn/csv2googlesheets
|
71656dcc6827b1c58ffe80bc55aa6f1ee816f216
|
[
"MIT"
] | null | null | null |
csv2googlesheets/to_google_sheets.py
|
AlexSkrn/csv2googlesheets
|
71656dcc6827b1c58ffe80bc55aa6f1ee816f216
|
[
"MIT"
] | null | null | null |
csv2googlesheets/to_google_sheets.py
|
AlexSkrn/csv2googlesheets
|
71656dcc6827b1c58ffe80bc55aa6f1ee816f216
|
[
"MIT"
] | null | null | null |
"""This module provides a console interface to convert CSV to Google Sheets."""
from csv2googlesheets.gapi_authorization import auth_with_google
from csv2googlesheets.gapi_create_sheet import create_sheet
from csv2googlesheets.gapi_write_to_sheet import write_to_sheet
from csv2googlesheets.parse_file import build_spreadsheet_title
from csv2googlesheets.parse_file import parse_file
from csv2googlesheets.parse_cli_args import parse_cli_args
def main():
"""Control the flow of operations to write data from csv to G Sheets."""
cli_args = parse_cli_args()
values = parse_file(path=cli_args.csv)
spreadsheet_title = build_spreadsheet_title(cli_args.csv)
google_service = auth_with_google(path_creds=cli_args.credentials_json)
spreadsheet_id = create_sheet(google_service, spreadsheet_title)
write_to_sheet(
google_service,
sheet_id=spreadsheet_id,
values=values,
)
if __name__ == '__main__':
main()
| 32.333333
| 79
| 0.786598
| 131
| 970
| 5.427481
| 0.351145
| 0.068917
| 0.101266
| 0.081575
| 0.098453
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007308
| 0.153608
| 970
| 29
| 80
| 33.448276
| 0.858709
| 0.14433
| 0
| 0
| 0
| 0
| 0.009768
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.315789
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
0afd7a5b152406bcaea034f10b6d1b88302e3d68
| 434
|
py
|
Python
|
web/snowflake.py
|
jphacks/C_2118
|
a63279e92362e09d1856e3d44edb4793d370fd7a
|
[
"MIT"
] | null | null | null |
web/snowflake.py
|
jphacks/C_2118
|
a63279e92362e09d1856e3d44edb4793d370fd7a
|
[
"MIT"
] | 5
|
2021-10-30T00:55:45.000Z
|
2021-10-30T04:23:36.000Z
|
web/snowflake.py
|
jphacks/C_2118
|
a63279e92362e09d1856e3d44edb4793d370fd7a
|
[
"MIT"
] | null | null | null |
import time
class Snowflake:
def __init__(self, init_serial_no=0):
self.machine_id = 0
self.epoch = 0
self.serial_no = init_serial_no
def generate(self):
unique_id = (
((int(time.time() * 1000) - self.epoch) & 0x1FFFFFFFFFF) << 22
| (self.machine_id & 0x3FF) << 12
| (self.serial_no & 0xFFF)
)
self.serial_no += 1
return unique_id
| 24.111111
| 74
| 0.546083
| 53
| 434
| 4.188679
| 0.45283
| 0.18018
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059649
| 0.343318
| 434
| 17
| 75
| 25.529412
| 0.719298
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0.052995
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e40169279b6d0abaccc4f8f3610827c98bbcceff
| 6,197
|
py
|
Python
|
Overview/11 - funktsioonid.py
|
priidupaomets/python_kursus
|
731ab386ca40c321288659db21db23912ca7f8dd
|
[
"MIT"
] | 1
|
2021-02-19T15:21:28.000Z
|
2021-02-19T15:21:28.000Z
|
Overview/11 - funktsioonid.py
|
priidupaomets/python_kursus
|
731ab386ca40c321288659db21db23912ca7f8dd
|
[
"MIT"
] | null | null | null |
Overview/11 - funktsioonid.py
|
priidupaomets/python_kursus
|
731ab386ca40c321288659db21db23912ca7f8dd
|
[
"MIT"
] | 1
|
2018-03-24T11:01:46.000Z
|
2018-03-24T11:01:46.000Z
|
"""
funktsioonid.py
Funktsioonide ja protseduuride kasutamine
"""
#
# Protseduur
#
def minu_funktsioon():
print("See on protseduur")
# Kutsume funktsiooni vรคlja
minu_funktsioon()
#
# Funktsioon
#
def liida(num1, num2):
return num1 + num2
sum = liida(3, 5)
print(sum)
# Nรคide vaikevรครคrtuste kasutamisest
# def funk(arg1 = vรครคrtus1, arg2 = vรครคrtus2)
# pass
def funk(arg1 = 0, arg2 = "Test"):
print(arg1, arg2)
funk() # Kutsume funktsiooni vรคlja ilma argumente kaasa andmata
#
# Algarvude leidmine
#
def isprime(n):
if n <= 1:
return False
for i in range(2, n):
if n % i == 0:
return False
else:
return True
# Kustume funktsiooni testimiseks vรคlja
n = 5
if isprime(n):
print(f"{n} ON algarv") # Kasutame f-formaatimisstringi, mis lubab muutuja otse stringi sisse panna
else:
print(f"{n} EI OLE algarv")
def list_primes(max_num = 100):
for n in range(2, max_num):
if isprime(n):
print(n, end = ' ', flush = True)
print()
list_primes()
#
# Muutuva arvu argumentidega funktsioonid
#
# Lisame lihtsalt uusi argumente
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2, 3)) # Tรถรถtab
print(summa(1, 2)) # Saame vea, kuna uus funktsioon nรตuab 3 argumenti
# Katsetame funktsiooni รผlelaadimist (function overloading vรตi method overloading)
def summa(num1, num2):
return num1 + num2
def summa(num1, num2, num3):
return num1 + num2 + num3
print(summa(1, 2)) # Saame vea, kuna viimane def kirjutab eelmise รผle
print(summa(1, 2, 3))
# Katsetame vaikevรครคrtustega funktsioone
def summa(num1, num2, num3 = 0, num4 = 0):
return num1 + num2 + num3 + num4
print(summa(1, 2))
print(summa(1, 2, 3))
print(summa(1, 2, 3, 4))
#print(summa(1, 2, 3, 4, 5)) # Selle tรถรถle saamiseks peame f-ni muutma
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 4.0
return sum / argumente
print(keskmine(1, 2)) # Ilmselgelt vale tulemus (1.5 asemel 0.75)
print(keskmine(1, 2, 3)) # Ka vale tulemus (2 asemel 1.5)
print(keskmine(1, 2, 3, 4)) # รige tulemus
# Tรคiendame argumentide arvu leidmist
def keskmine(num1, num2, num3 = 0, num4 = 0):
sum = num1 + num2 + num3 + num4 # Sama, mis summa(num1, num2, num3, num4)
argumente = 2.0 # Minimaalselt 2
if num3 > 0:
argumente = argumente + 1
if num4 > 0:
argumente = argumente + 1
return sum / argumente
print(keskmine(1, 2)) # รige tulemus
print(keskmine(1, 2, 3)) # รige tulemus
print(keskmine(1, 2, 3, 4)) # รige tulemus
print(keskmine(1, 2, 3, 0)) # Vale tulemus!
print(keskmine(1, 0, 3, 2)) # รige tulemus!?! Kuidas see nรผรผd รตige on - kas tulemus sรตltub argumentide jรคrjekorrast?
# Kasutame teistsugust vaikevรครคrtust
def keskmine(num1, num2, num3 = None, num4 = None):
sum = num1 + num2 # Ei saa kohe 4 arg'i kokku liita
argumente = 2.0 # Minimaalselt 2
if num3 is not None:
argumente += 1
sum = sum + num3
if num4 is not None:
argumente += 1
sum = sum + num4
return sum / argumente
print(keskmine(1, 2)) # รige tulemus
print(keskmine(1, 2, 3)) # รige tulemus
print(keskmine(1, 2, 3, 4)) # รige tulemus
print(keskmine(1, 2, 3, 0)) # รige tulemus!
print(keskmine(1, 0, 3, 2)) # รige tulemus
# Proovime listiga argumente defineerida
def summa(numbrid=[]):
sum = 0
for num in numbrid:
sum += num
return sum
#print(summa(1)) # Ei tรถรถta, kuna pole itereeritav tรผรผp
#print(summa(1, 2)) # Ei tรถรถta, kuna pole massiiv
arvud=[1, 2]
print(summa(arvud))
arvud=[1, 2, 3]
print(summa(arvud))
arvud=[1, 2, 3, 4]
print(summa(arvud))
print(summa([1, 2, 3, 4, 5])) # Vรตime panna ka ilma vahemuutujata
arvud=[1]
print(summa(arvud))
def summa(*numbrid):
sum = 0
for num in numbrid:
sum += num
return sum
print(summa()) # Isegi see variant tรถรถtab
print(summa(1))
print(summa(1, 2))
arvud=[1, 2]
print(summa(*arvud)) # Ka siin tuleb '*' kasutada
arvud=[1, 2, 3]
print(summa(*arvud))
arvud=[1, 2, 3, 4]
print(summa(*arvud))
arvud=[1, 2, 3, 4, 5]
print(summa(*arvud))
arvud=[1]
print(summa(*arvud))
# Erinevat sort argumendid
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True"):
print(arg1, arg2, *args, kw1, kw2)
argfun(1, 2, 3, 4, 5, kw1 = 10, kw2 = 12)
def argfun(**kwargs):
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, **kwargs):
print(arg1, arg2, *args)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
def argfun(arg1, arg2, *args, kw1 = 1, kw2 = "True", **kwargs):
print(arg1, arg2, *args, kw1, kw2)
for (arg, val) in kwargs.items():
print(f"{arg}={val}", end = ' ')
print()
argfun(1, 2, 3, 4, 5, kw2 = 10, kw3 = 12, kw4 = 14)
# Kuidas garanteerida, et argumentideks on numbrid?
def numsum(*numbrid):
sum = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
sum += num
return sum
def numcount(*numbrid):
count = 0
for num in numbrid:
if isinstance(num, int) or isinstance(num, float):
count += 1
return count
def numavg(*numbrid):
sum = numsum(*numbrid)
count = numcount(*numbrid)
return sum / (count * 1.0) # Vรตime jagatava teha float tรผรผbiks
print(numsum(1))
print(numsum(1, 2))
print(numsum(1, 2, 3))
print(numsum(1, 2, 3, "4"))
print(numsum(1, None, 3, 4, 5))
print("-"*30)
print(numcount(1))
print(numcount(1, 2))
print(numcount(1, 2, 3))
print(numcount(1, 2, 3, "4"))
print(numcount(1, None, 3, 4, 5))
print("-"*30)
print(numavg(1))
print(numavg(1, 2))
print(numavg(1, 2, 3))
print(numavg(1, 2, 3, "4"))
print(numavg(1, None, 3, 4, 5))
print(numavg()) # Viga! Nulliga jagamine!!!
# Vigade haldamist vaatame peatselt ka lรคhemalt
| 24.01938
| 116
| 0.606745
| 916
| 6,197
| 4.098253
| 0.209607
| 0.021843
| 0.022376
| 0.015983
| 0.51252
| 0.428609
| 0.393713
| 0.333777
| 0.313266
| 0.283165
| 0
| 0.072966
| 0.250282
| 6,197
| 257
| 117
| 24.11284
| 0.735041
| 0.272229
| 0
| 0.563636
| 0
| 0
| 0.022732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127273
| false
| 0
| 0
| 0.030303
| 0.224242
| 0.406061
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
e40722bed82cf8f0cac95ef9146f043dd3dc25ca
| 5,318
|
py
|
Python
|
05-Environments/hw02/hw02/hw02.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | 2
|
2020-04-24T18:36:53.000Z
|
2020-04-25T00:15:55.000Z
|
05-Environments/hw02/hw02/hw02.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | null | null | null |
05-Environments/hw02/hw02/hw02.py
|
ericchen12377/CS61A_LearningDoc
|
31f23962b0e2834795bf61eeb0f4884cc5da1809
|
[
"MIT"
] | null | null | null |
""" Homework 2: Higher Order Functions"""
HW_SOURCE_FILE = 'hw02.py'
from operator import add, mul, sub
square = lambda x: x * x
identity = lambda x: x
triple = lambda x: 3 * x
increment = lambda x: x + 1
######################
# Required Questions #
######################
def product(n, f):
"""Return the product of the first n terms in a sequence.
n -- a positive integer
f -- a function that takes one argument to produce the term
>>> product(3, identity) # 1 * 2 * 3
6
>>> product(5, identity) # 1 * 2 * 3 * 4 * 5
120
>>> product(3, square) # 1^2 * 2^2 * 3^2
36
>>> product(5, square) # 1^2 * 2^2 * 3^2 * 4^2 * 5^2
14400
>>> product(3, increment) # (1+1) * (2+1) * (3+1)
24
>>> product(3, triple) # 1*3 * 2*3 * 3*3
162
"""
"*** YOUR CODE HERE ***"
result,k = 1,1
while k <= n:
result,k = f(k)*result, k + 1
return result
def accumulate(combiner, base, n, f):
"""Return the result of combining the first n terms in a sequence and base.
The terms to be combined are f(1), f(2), ..., f(n). combiner is a
two-argument commutative, associative function.
>>> accumulate(add, 0, 5, identity) # 0 + 1 + 2 + 3 + 4 + 5
15
>>> accumulate(add, 11, 5, identity) # 11 + 1 + 2 + 3 + 4 + 5
26
>>> accumulate(add, 11, 0, identity) # 11
11
>>> accumulate(add, 11, 3, square) # 11 + 1^2 + 2^2 + 3^2
25
>>> accumulate(mul, 2, 3, square) # 2 * 1^2 * 2^2 * 3^2
72
>>> accumulate(lambda x, y: x + y + 1, 2, 3, square)
19
>>> accumulate(lambda x, y: 2 * (x + y), 2, 3, square)
58
>>> accumulate(lambda x, y: (x + y) % 17, 19, 20, square)
16
"""
"*** YOUR CODE HERE ***"
result, k = base,1
while k <= n:
result, k = combiner(result,f(k)), k + 1
return result
def summation_using_accumulate(n, f):
"""Returns the sum of f(1) + ... + f(n). The implementation
uses accumulate.
>>> summation_using_accumulate(5, square)
55
>>> summation_using_accumulate(5, triple)
45
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'summation_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 0, 1
# while k <= n:
# result, k = result + f(k), k + 1
return accumulate(add,0,n,f)
def product_using_accumulate(n, f):
"""An implementation of product using accumulate.
>>> product_using_accumulate(4, square)
576
>>> product_using_accumulate(6, triple)
524880
>>> from construct_check import check
>>> # ban iteration and recursion
>>> check(HW_SOURCE_FILE, 'product_using_accumulate',
... ['Recursion', 'For', 'While'])
True
"""
"*** YOUR CODE HERE ***"
# result, k = 1, 1
# while k <= n:
# result, k = result * f(k), k + 1
return accumulate(mul,1,n,f)
def compose1(h, g):
"""Return a function f, such that f(x) = h(g(x))."""
def f(x):
return h(g(x))
return f
def make_repeater(h, n):
"""Return the function that computes the nth application of h.
>>> add_three = make_repeater(increment, 3)
>>> add_three(5)
8
>>> make_repeater(triple, 5)(1) # 3 * 3 * 3 * 3 * 3 * 1
243
>>> make_repeater(square, 2)(5) # square(square(5))
625
>>> make_repeater(square, 4)(5) # square(square(square(square(5))))
152587890625
>>> make_repeater(square, 0)(5) # Yes, it makes sense to apply the function zero times!
5
"""
"*** YOUR CODE HERE ***"
def repeater(x):
result, k = x,1
while k <= n:
result,k = h(result), k + 1
return result
return repeater
##########################
# Just for fun Questions #
##########################
def zero(f):
return lambda x: x
def successor(n):
return lambda f: lambda x: f(n(f)(x))
def one(f):
"""Church numeral 1: same as successor(zero)"""
"*** YOUR CODE HERE ***"
return lambda x: f(x)
def two(f):
"""Church numeral 2: same as successor(successor(zero))"""
"*** YOUR CODE HERE ***"
return lambda x: f(f(x))
three = successor(two)
def church_to_int(n):
"""Convert the Church numeral n to a Python integer.
>>> church_to_int(zero)
0
>>> church_to_int(one)
1
>>> church_to_int(two)
2
>>> church_to_int(three)
3
"""
"*** YOUR CODE HERE ***"
return n(lambda x: x + 1)(0)
def add_church(m, n):
"""Return the Church numeral for m + n, for Church numerals m and n.
>>> church_to_int(add_church(two, three))
5
"""
"*** YOUR CODE HERE ***"
return lambda f: lambda x: m(f)(n(f)(x))
def mul_church(m, n):
"""Return the Church numeral for m * n, for Church numerals m and n.
>>> four = successor(three)
>>> church_to_int(mul_church(two, three))
6
>>> church_to_int(mul_church(three, four))
12
"""
"*** YOUR CODE HERE ***"
return lambda f: m(n(f))
def pow_church(m, n):
"""Return the Church numeral m ** n, for Church numerals m and n.
>>> church_to_int(pow_church(two, three))
8
>>> church_to_int(pow_church(three, two))
9
"""
"*** YOUR CODE HERE ***"
return n(m)
| 25.690821
| 92
| 0.548326
| 788
| 5,318
| 3.624365
| 0.178934
| 0.031863
| 0.046218
| 0.037815
| 0.382353
| 0.30042
| 0.248599
| 0.212185
| 0.212185
| 0.184874
| 0
| 0.057647
| 0.275856
| 5,318
| 206
| 93
| 25.815534
| 0.683978
| 0.587251
| 0
| 0.288136
| 0
| 0
| 0.150635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.271186
| false
| 0
| 0.016949
| 0.050847
| 0.559322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e409ad0c94dc67812d4ce4eb1f3a9b3b256b6a43
| 638
|
py
|
Python
|
acceptance/test/TestStartStopFeature.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | null | null | null |
acceptance/test/TestStartStopFeature.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | 12
|
2015-10-22T15:38:28.000Z
|
2016-03-22T18:53:57.000Z
|
acceptance/test/TestStartStopFeature.py
|
ismacaulay/qtcwatchdog
|
72f3588eef1019bac8788fa58c52722dfa7c4d28
|
[
"MIT"
] | null | null | null |
from acceptance.harness.acceptance_test import WatchdogAcceptanceTest
class TestStartStopFeature(WatchdogAcceptanceTest):
def test_willStartObserverWhenWatchdogStarted(self):
self.create_and_start_watchdog()
self.assertTrue(self.fs_observer.running)
def test_willStopObserverWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertFalse(self.fs_observer.running)
def test_willJoinObserverThreadWhenWatchdogStopped(self):
self.create_and_start_watchdog()
self.watchdog.stop()
self.assertTrue(self.fs_observer.joined)
| 26.583333
| 69
| 0.761755
| 61
| 638
| 7.704918
| 0.393443
| 0.044681
| 0.089362
| 0.108511
| 0.476596
| 0.404255
| 0.285106
| 0.212766
| 0.212766
| 0.212766
| 0
| 0
| 0.166144
| 638
| 23
| 70
| 27.73913
| 0.883459
| 0
| 0
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.230769
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e409e1ff47556f0c395cedaf6538d4e9082df50c
| 1,243
|
py
|
Python
|
neural_spline_flows/nde/transforms/transform_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/transform_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/transform_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
import torch
import torchtestcase
from neural_spline_flows.nde.transforms import base
class TransformTest(torchtestcase.TorchTestCase):
"""Base test for all transforms."""
def assert_tensor_is_good(self, tensor, shape=None):
self.assertIsInstance(tensor, torch.Tensor)
self.assertFalse(torch.isnan(tensor).any())
self.assertFalse(torch.isinf(tensor).any())
if shape is not None:
self.assertEqual(tensor.shape, torch.Size(shape))
def assert_forward_inverse_are_consistent(self, transform, inputs):
inverse = base.InverseTransform(transform)
identity = base.CompositeTransform([inverse, transform])
outputs, logabsdet = identity(inputs)
self.assert_tensor_is_good(outputs, shape=inputs.shape)
self.assert_tensor_is_good(logabsdet, shape=inputs.shape[:1])
self.assertEqual(outputs, inputs)
self.assertEqual(logabsdet, torch.zeros(inputs.shape[:1]))
def assertNotEqual(self, first, second, msg=None):
if ((self._eps and (first - second).abs().max().item() < self._eps) or
(not self._eps and torch.equal(first, second))):
self._fail_with_message(msg, "The tensors are _not_ different!")
| 37.666667
| 78
| 0.693484
| 149
| 1,243
| 5.630872
| 0.416107
| 0.042908
| 0.05006
| 0.064362
| 0.052443
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001992
| 0.192277
| 1,243
| 32
| 79
| 38.84375
| 0.833665
| 0.023331
| 0
| 0
| 0
| 0
| 0.026534
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
e40c283a7830ae526fea47bfe3f1719fdb809be3
| 358
|
py
|
Python
|
directory-traversal/validate-file-extension-null-byte-bypass.py
|
brandonaltermatt/penetration-testing-scripts
|
433b5d000a5573e60b9d8e49932cedce74937ebc
|
[
"MIT"
] | null | null | null |
directory-traversal/validate-file-extension-null-byte-bypass.py
|
brandonaltermatt/penetration-testing-scripts
|
433b5d000a5573e60b9d8e49932cedce74937ebc
|
[
"MIT"
] | null | null | null |
directory-traversal/validate-file-extension-null-byte-bypass.py
|
brandonaltermatt/penetration-testing-scripts
|
433b5d000a5573e60b9d8e49932cedce74937ebc
|
[
"MIT"
] | null | null | null |
"""
https://portswigger.net/web-security/file-path-traversal/lab-validate-file-extension-null-byte-bypass
"""
import sys
import requests
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
url = f'''https://{site}/image?filename=../../../etc/passwd%00.png'''
s = requests.Session()
resp = s.get(url)
print(resp.text)
| 21.058824
| 101
| 0.664804
| 52
| 358
| 4.576923
| 0.730769
| 0.067227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009288
| 0.097765
| 358
| 17
| 102
| 21.058824
| 0.727554
| 0.282123
| 0
| 0
| 0
| 0
| 0.292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.111111
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7c0efca532f7042e0db58c5e7fb4f25f0274261b
| 3,437
|
py
|
Python
|
Assignment Day 2 .py
|
ShubhamKahlon57/Letsupgrade-python-Batch-7
|
7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af
|
[
"Apache-2.0"
] | null | null | null |
Assignment Day 2 .py
|
ShubhamKahlon57/Letsupgrade-python-Batch-7
|
7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af
|
[
"Apache-2.0"
] | null | null | null |
Assignment Day 2 .py
|
ShubhamKahlon57/Letsupgrade-python-Batch-7
|
7989c2d2f17e58dd4ee8f278c37d2c1d18e5e3af
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#List and function
# In[6]:
# empty list
my_list = []
# list of integers
my_list = [1, 2, 3]
# list with mixed data types
my_list = [1, "Hello", 3.4]
# In[7]:
# nested list
my_list = ["mouse", [8, 4, 6], ['a']]
# In[11]:
# List indexing
my_list = ['p', 'r', 'o', 'b', 'e']
# Output: p
print(my_list[0])
# Output: o
print(my_list[2])
# Output: e
print(my_list[4])
# Nested List
n_list = ["Happy", [2, 0, 1, 5]]
# Nested indexing
print(n_list[0][1])
print(n_list[1][3])
# Error! Only integer can be used for indexing
print(my_list[4])
# In[9]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[13]:
# Deleting list items
my_list = ['p', 'r', 'o', 'b', 'l', 'e', 'm']
# delete one item
del my_list[2]
print(my_list)
# delete multiple items
del my_list[1:5]
print(my_list)
# delete entire list
del my_list
# In[14]:
# Appending and Extending lists in Python
odd = [1, 3, 5]
odd.append(7)
print(odd)
odd.extend([9, 11, 13])
print(odd)
# In[15]:
#Dictionary and function
# In[18]:
y_dict = {}
# dictionary with integer keys
my_dict = {1: 'apple', 2: 'ball'}
# dictionary with mixed keys
my_dict = {'name': 'John', 1: [2, 4, 3]}
# using dict()
my_dict = dict({1:'apple', 2:'ball'})
# from sequence having each item as a pair
my_dict = dict([(1,'apple'), (2,'ball')])
# In[20]:
# get vs [] for retrieving elements
my_dict = {'name': 'Jack', 'age': 26}
# Output: Jack
print(my_dict['name'])
# Output: 26
print(my_dict.get('age'))
# In[21]:
# Changing and adding Dictionary Elements
my_dict = {'name': 'Jack', 'age': 26}
# update value
my_dict['age'] = 27
#Output: {'age': 27, 'name': 'Jack'}
print(my_dict)
# add item
my_dict['address'] = 'Downtown'
# Output: {'address': 'Downtown', 'age': 27, 'name': 'Jack'}
print(my_dict)
# In[22]:
#Sets and its function
# In[23]:
my_set = {1, 2, 3}
print(my_set)
# In[24]:
my_set = {1.0, "Hello", (1, 2, 3)}
print(my_set)
# In[25]:
# set cannot have duplicates
my_set = {1, 2, 3, 4, 3, 2}
print(my_set)
# In[26]:
#Tuple and its method
# In[27]:
# Tuple having integers
my_tuple = (1, 2, 3)
print(my_tuple)
# In[28]:
my_tuple = ("hello")
print(type(my_tuple))
# In[30]:
# Accessing tuple elements using indexing
my_tuple = ('p','e','r','m','i','t')
print(my_tuple[0])
print(my_tuple[5])
# In[31]:
print(my_tuple[-1])
# In[32]:
print(my_tuple[-6])
# In[36]:
# Changing tuple values
my_tuple = (4, 2, 3, [6, 5])
# TypeError: 'tuple' object does not support item assignment
# my_tuple[1] = 9
# However, item of mutable element can be changed
my_tuple[3][0] = 9 # Output: (4, 2, 3, [9, 5])
print(my_tuple)
# Tuples can be reassigned
my_tuple = ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
# Output: ('p', 'r', 'o', 'g', 'r', 'a', 'm', 'i', 'z')
print(my_tuple)
# In[37]:
#String and its function
# In[38]:
# Python string examples - all assignments are identical.
String_var = 'Python'
String_var = "Python"
String_var = """Python"""
# with Triple quotes Strings can extend to multiple lines
String_var = """ This document will help you to
explore all the concepts
of Python Strings!!! """
# Replace "document" with "tutorial" and store in another variable
substr_var = String_var.replace("document", "tutorial")
print (substr_var)
# In[ ]:
| 12.059649
| 66
| 0.607507
| 573
| 3,437
| 3.541012
| 0.289703
| 0.069
| 0.0414
| 0.016264
| 0.22622
| 0.207491
| 0.175456
| 0.089699
| 0.089699
| 0.080828
| 0
| 0.05208
| 0.195519
| 3,437
| 284
| 67
| 12.102113
| 0.681736
| 0.441082
| 0
| 0.347826
| 0
| 0
| 0.134021
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.405797
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
7c0f552f843493e2753dc5c4baf8ccf2206f5f32
| 195
|
py
|
Python
|
hackerrank/pickingNumbers.py
|
irvandindaprakoso/online-test-py
|
a7a6cd98ba3e0b74558ecb7e431eb2729077a38a
|
[
"W3C"
] | null | null | null |
hackerrank/pickingNumbers.py
|
irvandindaprakoso/online-test-py
|
a7a6cd98ba3e0b74558ecb7e431eb2729077a38a
|
[
"W3C"
] | null | null | null |
hackerrank/pickingNumbers.py
|
irvandindaprakoso/online-test-py
|
a7a6cd98ba3e0b74558ecb7e431eb2729077a38a
|
[
"W3C"
] | null | null | null |
def pickingNumbers(a):
# Write your code here
max = 0
for i in a:
c = a.count(i)
d = a.count(i-1)
e = c+d
if e>max:
max = e
return max
| 17.727273
| 26
| 0.435897
| 32
| 195
| 2.65625
| 0.59375
| 0.141176
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 0.461538
| 195
| 10
| 27
| 19.5
| 0.790476
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c120c632a3695672ca8dce5ff251b3540195c6e
| 68,026
|
py
|
Python
|
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
"""
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
class SptTmpx(sptdraw.SptDrawBase):
def __init__(self, size, *args, **kwargs):
super(SptTmpx, self).__init__(size)
self.draw_on()
def draw_on(self, *args, **kwargs):
self.fill(consts.GREEN)
self.pygm.draw.circle(self.surf, consts.WHITE,
(self.size[0] / 2, self.size[1] / 2),
self.size[0] / 2, 0)
class SptTmpi(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(SptTmpi, self).__init__(img_file)
class FPSptBg(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptBg, self).__init__(img_file, pos)
class FPSptSprts(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptSprts, self).__init__(img_file, pos)
class FPSptFog(sptdraw.SptDrawBase):
def __init__(self, size, c=[0, 81, 8, 0], h=30, *args, **kwargs):
super(FPSptFog, self).__init__(size)
self.c = c
self.h = h
self.draw_on()
def draw_on(self, *args, **kwargs):
#self.fill(self.c)
d = 2
n = self.h / d
for i in range(n):
rct = [0, i * d, self.size[0], d]
#ca = 255 / n * (n - i)
ca = 200 / n * (n - i)
self.c[3] = ca
self.pygm.draw.rect(self.surf, self.c, rct)
class FPSptRdSprts(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(FPSptRdSprts, self).__init__(img_file)
@classmethod
def create_by_img(cls, img):
return cls(img)
# for test
#o = SptTmpx((40, 40))
#return o
class FPSptRoadB(sptdraw.SptDrawBase):
def __init__(self, size, cfg, *args, **kwargs):
super(FPSptRoadB, self).__init__(size)
self.cfg = cfg
self.car = kwargs.get('car')
self.bg_sky = kwargs.get('bg_sky')
self.bg_hills = kwargs.get('bg_hills')
self.bg_trees = kwargs.get('bg_trees')
self.clr_dark_road = utils.clr_from_str(FP_COLORS['DARK']['road'])
self.clr_dark_grass = utils.clr_from_str(FP_COLORS['DARK']['grass'])
self.rd_reset(init=True)
self.add_fog()
def prms_reset(self, keep_segs=False):
self.e_keys_up = []
self.e_keys_dn = []
self.camera_x = 0.0
self.camera_y = 0.0
self.camera_z = 500.0#1000.0#0.0 == self.camera_h
self.xw = 0.0
self.yw = 0.0
self.zw = 0.0
self.xc = 0.0
self.yc = 0.0
self.zc = 0.0 ##
self.xp = 0.0
self.yp = 0.0
self.xs = 0.0
self.ys = 0.0
self.d = 200.0#100.0#10.0#30.0#1.0
self.w = self.size[0]
self.h = self.size[1]
if not keep_segs:
self.segments = []
self.rd_sprt_objs = {}
self.rd_sprt_cache = [] # for sprites render order
self.track_len = 0.0
self.seg_len = 200.0#100.0#20.0#60.0#200.0#
self.road_w = 2400#2000#600.0#200.0#1000.0#200#
self.camera_h = 500.0#1000.0#
self.speed_max = 300.0#180.0#200.0#100.0
self.lane_w = 60
self.seg_n = 300#200
#self.seg_draw_n = 200#150
self.seg_draw_n = 70#100#200#150
self.speed = 0.0
self.position = 0.0
self.player_x = 0.0#100.0#1000.0#
self.centrifugal = 0.1#0.06#0.08#0.01#0.3
self.player_seg = None
self.base_seg = None # the segment just under the car
self.player_di = 0 # 0:^ 1:> 2:v 3:<
self.player_go = 0 # 0:- 1:^ 2:v
self.speed_dt_up = 1.0#2.0#3.0
self.speed_dt_dn = 2.0#4.0#6.0
self.speed_dt_na = 1.0#3.0
self.player_x_dt = 60.0#30.0#20.0
self.last_seg_i = 0
self.score = 0
self.game_over = False
self.game_score = 0.0
self.tm_start = 0.0
self.tm_end = 0.0
self.tm_last_once = 0.0
self.sky_speed = 0.1#0.05#
self.hill_speed = 0.2#0.1#
self.tree_speed = 0.3#0.15#
def rd_reset(self, init=False, keep_segs=False, segs_file=None):
#if not init and not keep_segs:
if not init:
self.rd_sprts_del_all_objs()
self.prms_reset(keep_segs=keep_segs)
if segs_file is not None:
try:
segs = self.rd_seg_json_load(segs_file)
self.segments = segs
self.track_len = len(self.segments) * self.seg_len
except Exception as e:
print e
self.init_rd_segs_rand_1()
else:
if not keep_segs:
self.init_rd_segs_rand_1()
self.draw_on()
self.rd_seg_render()
def init_rd_segs_rand_1(self):
#self.rd_seg_init(self.seg_n)
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
#self.rd_seg_init(random.randint(30, 100))
self.rd_seg_init(random.randint(1, 10)) # for a3c train
self.rd_seg_init_rand_curve()
#self.add_curves()
#self.add_low_rolling_hills(20, 2.0)
##self.add_low_rolling_hills(30, 4.0)
#self.rd_seg_init_rand(10)#50#10#3#1
#segnrand = random.randint(3, 30)
segnrand = random.randint(2, 6) # for a3c train
self.rd_seg_init_rand(segnrand)
# for segment draw
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
self.rd_seg_init(10) # for a3c train
self.rd_start_seg_init()
self.rd_sprts_init_rand()
def draw_on(self, *args, **kwargs):
self.fill(self.clr_dark_grass)
def add_fog(self):
self.fog = FPSptFog(self.size)
self.fog.rect.top = 240
self.fog.rect.left = 0
self.disp_add(self.fog)
def get_seg_base_i(self, pos=None):
if pos is None:
pos = self.position
i = int(pos / self.seg_len)
#x#i = int(utils.math_round(pos / self.seg_len))
#i = int(math.floor(pos / self.seg_len))
#i = int(math.ceil(pos / self.seg_len))
seg_n = len(self.segments)
i = (i + seg_n) % seg_n
return i
def rd_get_segs(self, whole=False):
if whole:
segs = self.segments
else:
segs = self.segments[:-self.seg_draw_n]
return segs
# #### geometry #### #
def geo_prjc_scale(self, d, zc):
if zc == 0.0:
return 1.0
else:
return d / zc
def xc_to_xp(self, xc, d, zc):
if zc == 0.0:
#xp = float('inf')
#xp = 2 ** 64
xp = xc
else:
xp = xc * (d / zc)
return xp
def yc_to_yp(self, yc, d, zc):
if zc == 0.0:
#yp = float('inf')
#yp = 2 ** 64
yp = yc
else:
yp = yc * (d / zc)
return yp
def xp_to_xs(self, xp, w):
#xs = w / 2.0 + w / 2.0 * xp
xs = w / 2.0 + xp
return xs
def yp_to_ys(self, yp, h):
#ys = h / 2.0 - h / 2.0 * yp
ys = h / 2.0 - yp
return ys
def rd_seg_init(self, a=500):
for n in range(a):
self.rd_seg_add(0.0, 0.0)
def rd_seg_add(self, curve=0.0, yw=0.0):
#print '+', curve, yw
n = len(self.segments)
#print n
if n % 2 == 0:
#if n % 4 == 0:
c = FP_COLORS['LIGHT']
#c = {'road': FP_COLOR_WHITE}
else:
c = FP_COLORS['DARK']
#c = {'road': FP_COLOR_BLACK}
seg = {
'index': n,
'p1': {'world': {'z': (n + 1) * self.seg_len,
'y': self.seg_lasy_y()},
'camera': {},
'screen': {}},
'p2': {'world': {'z': (n + 2) * self.seg_len,
'y': yw},
'camera': {},
'screen': {}},
'curve': curve,
'color': c,
'sprites': [],
'looped': 0,
}
self.segments.append(seg)
self.track_len = len(self.segments) * self.seg_len
#self.track_len = (len(self.segments) - self.seg_draw_n) * self.seg_len
def seg_lasy_y(self):
seg_n = len(self.segments)
if seg_n == 0:
return 0.0
else:
return self.segments[seg_n - 1]['p2']['world'].get('y', 0.0)
def rd_seg_init_rand(self, n=50):
#print 'rd_seg_init_rand', n
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
if p < 0.3:
curve = 0.0
yw = 0.0
#elif p < 0.8:
# curve = 0.0
# yw = random.random() * 10.0
else:
curve = rl * random.random() * 6.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_seg_init_rand_2(self, n=50):
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
if p < 0.35:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['MEDIUM'])
elif p < 0.7:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['EASY'])
else:
enter = random.randint(10, 100)
hold = random.randint(10, 100)
leave = random.randint(10, 100)
self.add_road(enter, hold, leave, 0.0, 0.0)
def rd_seg_init_rand_curve(self, n=5):
#print 'rd_seg_init_rand', n
for i in range(n):
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
curve = rl * random.random() * 8.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_start_seg_init(self, n=3):
seg_n = len(self.segments)
if seg_n == 0:
return
#self.segments[0]['color'] = FP_COLORS['START_Y']
#self.segments[2]['color'] = FP_COLORS['START_Y']
for i in range(n):
self.segments[i]['color'] = FP_COLORS['START_Y']
def rd_sprts_init_rand(self, n=None):
seg_n = len(self.segments)
if n is None:
#n = seg_n / 20
n = seg_n / random.randint(10, 30)
for i in range(n):
j = random.randint(10, seg_n - 10)
sprt = random.choice(FP_ROAD_SPRTS.keys())
s = {
'name': sprt,
'type': 1, # image / animate / ...
'obj': None, # need to create at render
##'x_i': None, # get real (random) x from x_pos
'x_i': random.randint(0, 4),
'score': FP_ROAD_SPRTS[sprt].get('score', 0),
}
self.segments[j]['sprites'].append(s)
def rd_sprts_del_all_objs(self):
for k, sprt in self.rd_sprt_objs.items():
#print k, sprt
self.disp_del(sprt)
del self.rd_sprt_objs[k]
def util_limit(self, value, mn, mx):
return max(mn, min(value, mx))
def util_accelerate(self, v, accel, dt):
return v + (accel * dt)
def util_increase(self, start, increment, mx): # with looping
result = start + increment
while (result >= mx):
result -= mx
while (result < 0):
result += mx
return result
def util_ease_in(self, a, b, percent):
return a + (b - a) * math.pow(percent, 2)
def util_ease_out(self, a, b, percent):
return a + (b - a) * (1 - math.pow(1 - percent, 2))
def util_ease_in_out(self, a, b, percent):
return a + (b - a) * ((-math.cos(percent * math.pi)/2) + 0.5)
def util_curve_percent_remaining(self, n, total):
return (n % total) / total
def add_road(self, enter, hold, leave, curve, yw=0.0):
#print enter, hold, leave, curve, yw
start_y = self.seg_lasy_y()
end_y = start_y + (int(yw) * self.seg_len)
total = enter + hold + leave
for n in range(enter):
self.rd_seg_add(self.util_ease_in(0, curve, float(n)/enter),
self.util_ease_out(start_y, end_y,
float(n)/total))
for n in range(hold):
self.rd_seg_add(curve,
self.util_ease_out(start_y, end_y,
(float(n)+enter)/total))
for n in range(leave):
self.rd_seg_add(self.util_ease_out(curve, 0, n/leave),
self.util_ease_out(start_y, end_y,
(float(n)+enter+hold)/total))
def add_curves(self):
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
0.0)
def add_low_rolling_hills(self, num, height):
num = num or ROAD['LENGTH']['SHORT']
height = height or ROAD['HILL']['LOW']
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, -height)
self.add_road(num, num, num, 0, height)
self.add_road(num, num, num, 0, 0)
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, 0)
def rd_seg_get_cleared(self, segs=None):
if not segs:
segs = self.segments
segs_c = []
for seg in segs:
if not seg['sprites']:
segs_c.append(seg)
else:
seg_c = {}
for k, v in seg.items():
if k not in ['sprites']:
seg_c[k] = v
else:
seg_c[k] = []
for spr in seg['sprites']:
spr_n = {}
for sk, sv in spr.items():
if sk not in ['obj']:
spr_n[sk] = sv
else:
spr_n[sk] = None
seg_c[k].append(spr_n)
segs_c.append(seg_c)
return segs_c
def rd_seg_json_save(self, f):
sc = self.rd_seg_get_cleared(self.segments)
s = utils.json_dumps(sc)
with open(f, 'w') as fo:
fo.write(s)
def rd_seg_json_load(self, f):
with open(f, 'r') as fi:
s = fi.read()
segs = utils.json_loads(s)
return segs
def rd_seg_render__1_o(self):
"""straight"""
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
for i, seg in enumerate(self.segments):
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
#zc1 = self.position - (zw1 - self.camera_z)
#zc2 = self.position - (zw2 - self.camera_z)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render__2_o(self):
"""curve test 1"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
#'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
#'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__3_o(self):
"""curve test 2: draw a circle"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
#xc1 = self.road_w / 2 - self.player_x
#xc2 = -self.road_w / 2 - self.player_x
#xc3 = self.road_w / 2 - self.player_x
#xc4 = -self.road_w / 2 - self.player_x
# <3>
#engi = math.pi / 2.0 / self.seg_draw_n
engi = math.pi / 2.0 / 60#10#20
rad = self.road_w * 4#2
rad1 = rad + self.road_w / 2
rad2 = rad - self.road_w / 2
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
# <3>
xx1 = rad1 * math.cos(engi * i)
xx2 = rad2 * math.cos(engi * i)
xx3 = rad1 * math.cos(engi * (i + 1))
xx4 = rad2 * math.cos(engi * (i + 1))
xc1 = (rad - xx1) - self.player_x
xc2 = (rad - xx2) - self.player_x
xc3 = (rad - xx3) - self.player_x
xc4 = (rad - xx4) - self.player_x
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__4_o(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
self.player_seg = self.segments[segbi]
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
#print 'segbi', segbi, ' / ', seg_n
self.player_seg = self.segments[segbi]
self.base_seg = self.segments[(segbi + 2) % seg_n]
# for test
#self.base_seg['color'] = FP_COLORS['FINISH']
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
#print 'b_curve', b_curve
#print 'world z', self.player_seg['p1']['world']['z']
#print 'world y', self.player_seg['p1']['world'].get('y', 0.0)
# clear the sprites cache
self.rd_sprt_cache = []
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
# for test
if i < 10:
print '>>> ', i
print 'curve', seg.get('curve', 0.0)
print 'world z', seg['p1']['world']['z']
print 'world y', seg['p1']['world'].get('y', 0.0)
#print '-' * 30
'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
zc1 = zw1 - self.camera_z - (self.position % self.seg_len)
zc2 = zw2 - self.camera_z - (self.position % self.seg_len)
'''
#x#
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
'''
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
# for hills
yw1 = seg['p1']['world'].get('y', 0.0)
yw2 = seg['p2']['world'].get('y', 0.0)
yc1 = yc - yw1
yc2 = yc - yw2
#print yw1, yw2
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc1, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc2, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
# for test
if i < 10:
print xs1, ys1, xs2, ys2
print xs4, ys4, xs3, ys3
print '-' * 30
'''
# grass
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
# road
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
# for test
#self.pygm.draw.circle(self.surf, consts.BLUE,
# (int(xsr1), 116 - int(ys1)),
# 3, 0)
# render road sprites
# TODO: check if this seg is looped
seg_scale = self.geo_prjc_scale(self.d, zc1)
x_rnd = random.randint(1, self.road_w / 2 - 10) * seg_scale
#x_sprt = (xs1 + xs2) / 2.0
#y_sprt = (ys1 + ys3) / 2.0
x_dt = x_rnd * seg_scale
x_pos = [xsr1, xsr2,
(xsr1 + xsl1) / 2.0,
(xsr2 + xsl2) / 2.0,
xsl1, xsl2]
#x_sprt = xsr1
x_sprt = (xsr1 + xsl1) / 2.0
#x_sprt = random.choice(x_pos)
x_i = random.randint(0, len(x_pos) - 1) # NOTE: not used now !!
##x_i = 2
y_sprt = ys1
scale_sprt = seg_scale * 8.0#10.0#2.0
obj = self.rd_sprts_render(seg, x_pos, x_i, y_sprt, scale_sprt)
if obj:
self.rd_sprt_cache.append(obj)
# render the sprites with right order
for obj in self.rd_sprt_cache[::-1]:
self.disp_add(obj)
def render_polygon(self, ctx, x1, y1, x2, y2, x3, y3, x4, y4, color):
#d = 200#100#240#50#
#a = 60
#pnts = [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x1, y1]]
#pnts = [[x1, y1-d], [x2, y2-d], [x3, y3-d], [x4, y4-d], [x1, y1-d]]
#pnts = [[x1, y1+a], [x2, y2+a], [x3, y3+a], [x4, y4+a], [x1, y1+a]]
# reflect the y-
d = 116
pnts = [[x1, d-y1], [x2, d-y2], [x3, d-y3], [x4, d-y4], [x1, d-y1]]
c = utils.clr_from_str(color)
try:
self.pygm.draw.polygon(self.surf, c, pnts)
except Exception as e:
#print '-' * 60
pass
def rd_sprts_render(self, seg, x_pos, x_i, y, scale):
sprts = seg.get('sprites')
if not sprts:
return None
for i, info in enumerate(sprts):
sprt = info['name']
obj_k = str(seg['index']) + '_' + str(i) + '_' + sprt
obj = info.get('obj')
'''
# TODO: <1>
if not obj:
obj = FPSptRdSprts.create_by_img(FP_ROAD_SPRTS[sprt][0])
info['obj'] = obj
self.disp_add(obj)
'''
# <2>
if obj:
self.disp_del(obj)
# NOTE: objs will be deleted at rd_sprts_del_all_objs()
##del self.rd_sprt_objs[obj_k]
img = FP_ROAD_SPRTS[sprt]['imgs'][0]
obj = FPSptRdSprts.create_by_img(img)
# avoid: pygame.error: Width or height is too large
if scale > 500:
#print 'scale <1>', scale
pass
else:
try:
obj.scale(scale)
except:
#print 'scale <2>', scale
pass
x_i_saved = info.get('x_i')
#if not x_i_saved:
# info['x_i'] = x_i
# x_i_saved = x_i
obj.rect.top = 116 - y + 240 - obj.rect.height
obj.rect.left = x_pos[x_i_saved] - obj.rect.width / 2
#obj.scale(scale)
info['obj'] = obj
##self.disp_add(obj) # NOTE: render out here
self.rd_sprt_objs[obj_k] = obj # for reset to delete all
# NOTE: only show one
break
return obj
def handle_event(self, events, *args, **kwargs):
#print '>>> ', events
if not self.flag_check_event:
return events
else:
return self.check_key(events)
def key_to_di(self, k):
if k == self.pglc.K_UP:
return 0
elif k == self.pglc.K_RIGHT:
return 1
elif k == self.pglc.K_DOWN:
return 2
elif k == self.pglc.K_LEFT:
return 3
else:
return None
def key_to_di_b(self, k):
if k == self.pglc.K_f or k == self.pglc.K_j:
return 0
elif k == self.pglc.K_k:
return 1
elif k == self.pglc.K_SPACE or k == self.pglc.K_v or k == self.pglc.K_n:
return 2
elif k == self.pglc.K_d:
return 3
else:
return None
def check_key(self, events):
#print id(events)
r_events = []
e_keys_up = []
e_keys_dn = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_up.append(di)
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_dn.append(di)
else:
r_events.append(event)
else:
r_events.append(event)
self.e_keys_up = e_keys_up
self.e_keys_dn = e_keys_dn
return r_events
def refresh__1(self, fps_clock, *args, **kwargs):
#print '>>> refresh'
#'''
if self.player_di == 3: # <
self.player_x -= 9
if self.player_x < -1000:
self.player_di = 1
elif self.player_di == 1:
self.player_x += 19
if self.player_x > 1000:
self.player_di = 3
#'''
#'''
self.position += 10.0#5.0#1.0
self.position += random.randint(2, 10)
if self.position > self.track_len:
self.position -= self.track_len
#'''
self.draw_on()
self.rd_seg_render()
def refresh(self, fps_clock, *args, **kwargs):
self.check_player_di(self.e_keys_dn, self.e_keys_up)
self.draw_on()
self.rd_seg_render()
self.update_world()
self.check_if_car_out_road()
self.check_score()
self.check_tm()
self.update_bg()
def check_player_di(self, e_keys_dn, e_keys_up):
if 0 in e_keys_dn:
self.player_go = 1
elif 2 in e_keys_dn:
self.player_go = 2
if 1 in e_keys_dn:
self.player_di = 1
elif 3 in e_keys_dn:
self.player_di = 3
if 0 in e_keys_up:
if self.player_go != 2:
self.player_go = 0
if 2 in e_keys_up:
if self.player_go != 1:
self.player_go = 0
if 1 in e_keys_up:
if self.player_di != 3:
self.player_di = 0
if 3 in e_keys_up:
if self.player_di != 1:
self.player_di = 0
def update_world(self):
if self.player_go == 1:
self.speed += self.speed_dt_up
elif self.player_go == 2:
self.speed -= self.speed_dt_dn
else:
self.speed -= self.speed_dt_na
# if on the grass, slow down
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
self.speed -= 10
if self.speed < 0.0:
self.speed = 0.0
elif self.speed > self.speed_max:
self.speed = self.speed_max
self.position += self.speed
if self.position > self.track_len:
self.position -= self.track_len
# for check score
self.last_seg_i = 0
self.game_over = True
self.game_score = 1.0
if self.player_di == 1:
#self.player_x += self.player_x_dt
self.player_x += self.speed / 5 + 20
elif self.player_di == 3:
#self.player_x -= self.player_x_dt
self.player_x -= self.speed / 5 + 20
else:
pass
p_curve = self.player_seg.get('curve', 0.0)
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#print p_dt
#self.player_x -= p_dt
self.player_x += p_dt
def check_if_car_out_road(self):
# decrease score when go out the road
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
if self.score > 0:
self.score -= 1
#self.score -= 1
#if self.score < 0:
# self.score = 0
self.game_over = True
self.game_score = -1.0
def check_score(self):
# make sure we check score once for a segment
seg_i = self.player_seg['index']
if seg_i > self.last_seg_i:
self.last_seg_i = seg_i
else:
return
# NOTE: here we should use the segment just under the car
#sprts = self.player_seg['sprites']
sprts = self.base_seg['sprites']
if not sprts:
return
# NOTE: we now only use the first sprite !
sprt = sprts[0]
x_i = sprt.get('x_i')
if x_i is None:
return
scr = sprt.get('score')
if not scr: # None or 0
return
obj = sprt.get('obj')
if not obj: # None or 0
return
#rd_w_half = self.road_w / 2
#x_pos = [rd_w_half + self.lane_w,
# rd_w_half - self.lane_w]
sprt_x = obj.rect.left
sprt_w = obj.rect.width
car_x = self.player_x
car_w = self.car.rect.width * 2
sprt_at = 10000
if x_i == 0:
sprt_at = 40
elif x_i == 1:
sprt_at = -40
elif x_i == 2:
sprt_at = 580
elif x_i == 3:
sprt_at = -580
elif x_i == 4:
sprt_at = 1100
elif x_i == 5:
sprt_at = -1100
#print 'sprt_x', sprt_x
#print 'car_x', car_x
#print 'car_w', car_w
#print 'sprt_at', (car_x - car_w / 2), sprt_at, (car_x + car_w / 2)
#print '-' * 40
w_half = car_w / 2 + sprt_w / 2
#if (car_x + car_w / 2) < sprt_x < (car_x + car_w / 2):
if (car_x - w_half) < sprt_at < (car_x + w_half):
self.score += scr
def check_tm(self):
if self.position > self.seg_len * 2:
if self.tm_start == 0.0:
self.tm_start = time.time()
self.tm_end = self.tm_start
else:
self.tm_end = time.time()
self.tm_last_once = self.tm_end - self.tm_start
else:
self.tm_start = 0.0
#self.tm_end = 0.0
def update_bg(self):
# always move the cloud
for sky in self.bg_sky:
sky.rect.left -= 1#self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
if self.speed <= 0.0:
return
p_curve = self.player_seg.get('curve', 0.0)
#p_curve = 3
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#p_dt = 40
#p_dt = -40
#p_dt = random.randint(-100, 100)
#print p_dt
for sky in self.bg_sky:
#print sky
sky.rect.left += int(self.sky_speed * p_dt)
# always move the cloud
#sky.rect.left -= self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
for hill in self.bg_hills:
hill.rect.left += int(self.hill_speed * p_dt)
if hill.rect.left + hill.rect.width < 0:
hill.rect.left += hill.rect.width * 2
if hill.rect.left - hill.rect.width > 0:
hill.rect.left -= hill.rect.width * 2
for trees in self.bg_trees:
trees.rect.left += int(self.tree_speed * p_dt)
if trees.rect.left + trees.rect.width < 0:
trees.rect.left += trees.rect.width * 2
if trees.rect.left - trees.rect.width > 0:
trees.rect.left -= trees.rect.width * 2
class FPSptRoadMap(sptdraw.SptDrawBase):
def __init__(self, size, segs, rad, *args, **kwargs):
super(FPSptRoadMap, self).__init__(size)
self.segs = segs
self.rad = rad
#self.fill(consts.WHITE)
self.draw_segs(self.segs, self.rad)
def xy_to_cntr(self, x, y):
return [self.size[0] / 2 + x, self.size[1] / 2 - y]
def cv_to_engl(self, curve, rad):
a = float(curve) / rad
#a *= 10.0
#print a
s = 1.0
if a < 0.0:
s = -1.0
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a)
return tht_d
def get_segs_pnts(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
cv_s = 0
cv_l = 0.0
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
if cv_s:
tht_d = self.cv_to_engl(cv_l, rad)
#tht += tht_d
tht -= tht_d
rad_m = 20.0#10.0#50.0#
cv_s = 0
cv_l = 0.0
else:
rad_m = 0.5#1.0#0.1#
else:
if cv_s:
cv_l += curve
else:
cv_s = 1
continue
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def get_segs_pnts_1(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
rad_m = 1.0#0.1#
else:
a = float(curve) / rad
a *= 10.0
#print a
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a) # TODO:
tht += tht_d
rad_m = 10.0#50.0#
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def draw_segs(self, segs, rad):
pnts = self.get_segs_pnts(segs, rad)
#print pnts
if len(pnts) <= 1:
return
#if len(pnts) > 0:
# pnts.append(pnts[0])
cpnts = [self.xy_to_cntr(p[0], p[1]) for p in pnts]
c = utils.clr_from_str(FP_COLOR_BLUE)
#self.pygm.draw.polygon(self.surf, c, cpnts)
self.pygm.draw.lines(self.surf, c, False, cpnts, 3)
class FPSptProgress(sptdraw.SptDrawBase):
def __init__(self, size, c_bg=consts.BLUE, c_prog=consts.GREEN):
super(FPSptProgress, self).__init__(size)
self.c_bg = c_bg
self.c_prog = c_prog
self.progress(0.0)
def progress(self, prog):
y = self.size[1] * prog
self.fill(self.c_bg)
#self.pygm.draw.rect(self.surf, consts.GREEN,
# [1, 0, self.size[0] - 2, y])
# from down to up
self.pygm.draw.rect(self.surf, self.c_prog,
[1, self.size[1] - y,
self.size[0] - 2, y])
class FPStraight(pygm.PyGMSprite):
def __init__(self, cfg, *args, **kwargs):
super(FPStraight, self).__init__()
self.cfg = cfg
self.bg_sky1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky1.rect.top = 0
self.bg_sky1.rect.left = 0
self.disp_add(self.bg_sky1)
self.bg_sky2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky2.rect.top = 0
self.bg_sky2.rect.left = self.bg_sky1.rect.width
self.disp_add(self.bg_sky2)
self.bg_hills1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills1.rect.top = 0
self.bg_hills1.rect.left = 0
self.disp_add(self.bg_hills1)
self.bg_hills2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills2.rect.top = 0
self.bg_hills2.rect.left = self.bg_hills1.rect.width
self.disp_add(self.bg_hills2)
self.bg_trees1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees1.rect.top = 0
self.bg_trees1.rect.left = 0
self.disp_add(self.bg_trees1)
self.bg_trees2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees2.rect.top = 0
self.bg_trees2.rect.left = self.bg_trees1.rect.width
self.disp_add(self.bg_trees2)
self.car = FPSptSprts('img_flatpath/images/sprites.png',
IMG_POS_SPRITES['PLAYER_STRAIGHT'])
#print self.road.cameraDepth/self.road.playerZ
#self.car.scale(self.road.cameraDepth/self.road.playerZ)
self.car.scale(2)
self.car.rect.top = 400
self.car.rect.left = (640 - self.car.rect.width) / 2
##self.disp_add(self.car) # car disp add after road
#self.road = FPSptRoad((640, 240), self.cfg)
self.road = FPSptRoadB((640, 240), self.cfg,
car=self.car,
bg_sky=[self.bg_sky1, self.bg_sky2],
bg_hills=[self.bg_hills1, self.bg_hills2],
bg_trees=[self.bg_trees1, self.bg_trees2])
self.road.rect.top = 240
self.road.rect.left = 0
self.disp_add(self.road)
self.disp_add(self.car)
self.rdmap = FPSptRoadMap((480, 480),
self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rect.top = 0
self.rdmap.rect.left = 80
self.rdmap.rotate(90)
self.disp_add(self.rdmap)
self.rdpsd = pygm.SptLbl(str(int(self.road.speed)),
c=consts.GREEN, font_size=12)
self.rdpsd.rect.top = 456
self.rdpsd.rect.left = 312
self.disp_add(self.rdpsd)
self.scr = pygm.SptLbl(str(int(self.road.score)),
c=consts.RED, font_size=16)
self.scr.rect.top = 40#454
self.scr.rect.left = 600
self.disp_add(self.scr)
self.tm_once = pygm.SptLbl(str(int(self.road.tm_last_once)),
c=consts.YELLOW, font_size=16)
self.tm_once.rect.top = 20#454
self.tm_once.rect.left = 600
self.disp_add(self.tm_once)
self.prog = FPSptProgress((4, 100), c_prog=consts.YELLOW)
self.prog.rect.top = 70#340
self.prog.rect.left = 610
#self.prog.rotate(180)
self.disp_add(self.prog)
self.spd = FPSptProgress((4, 100), c_prog=consts.GREEN)
self.spd.rect.top = 70#340
self.spd.rect.left = 602
#self.spd.rotate(180)
self.disp_add(self.spd)
def rdmap_hide(self):
self.rdmap.hide()
def rdmap_reset(self):
self.rdmap.clear()
self.rdmap.draw_segs(self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rotate(90)
def road_reset(self):
self.road.rd_reset()
self.rdmap_reset()
def road_reset_keep_segs(self):
self.road.rd_reset(init=False, keep_segs=True)
def road_reset_from_file(self, segs_file='sr_roads/sr_road.txt'):
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_reset(init=False, keep_segs=False,
segs_file=segs_file)
self.rdmap_reset()
def road_segs_to_file(self, segs_file=None):
if not segs_file:
segs_file = 'sr_roads/sr_road_' + str(int(time.time())) + '.txt'
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_seg_json_save(segs_file)
def handle_event(self, events, *args, **kwargs):
#return events
r_events = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
k = event.key
if k == self.pglc.K_SPACE:
# hide / show road map
self.rdmap_hide()
elif k == self.pglc.K_RETURN:
self.road_reset()
elif k == self.pglc.K_TAB:
self.road_reset_keep_segs()
elif k == self.pglc.K_BACKSPACE:
self.road_reset_from_file()
elif k == self.pglc.K_SLASH:
self.road_segs_to_file()
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
r_events.append(event)
else:
r_events.append(event)
return r_events
def refresh(self, fps_clock, *args, **kwargs):
self.rdpsd.lbl_set(str(int(self.road.speed)))
self.scr.lbl_set(str(int(self.road.score)))
self.tm_once.lbl_set(str(int(self.road.tm_last_once)))
prg = self.road.position / self.road.track_len
self.prog.progress(prg)
spdc = self.road.speed / self.road.speed_max
self.spd.progress(spdc)
class FPSceneA(pygm.PyGMScene):
def __init__(self, *args, **kwargs):
super(FPSceneA, self).__init__(*args, **kwargs)
self.straight = FPStraight({})
self.straight.rect.top = 0
self.straight.rect.left = 0
self.disp_add(self.straight)
''''
self.sn1 = SptTmpx((200, 200))
self.sn1.rect.top = 100
self.sn1.rect.left = 100
self.disp_add(self.sn1)
'''
'''
self.lb1 = pygm.SptLbl('hello,', c=consts.GREEN, font_size=32)
self.lb1.rect.top = 200
self.lb1.rect.left = 100
self.disp_add(self.lb1)
'''
def handle_event(self, events, *args, **kwargs):
return events
def refresh(self, fps_clock, *args, **kwargs):
pass
class GMFlatpath(pygm.PyGMGame):
def __init__(self, title, winw, winh, *args, **kwargs):
super(GMFlatpath, self).__init__(title, winw, winh)
bk_im = utils.dir_abs('starfish/data/img_bk_1.jpg', __file__)
#self.bk = pygm.SptImg('data/img_bk_1.jpg')
self.bk = pygm.SptImg(bk_im)
self.bk.rect.top = -230
self.bk.rect.left = -230
#self.disp_add(self.bk)
self.scn1 = FPSceneA()
self.disp_add(self.scn1)
road_file = kwargs.get('road_file')
if road_file:
self.scn1.straight.road_reset_from_file(segs_file=road_file)
def main():
#sf = GMFlatpath('flatpath <:::>', 640, 480)
sf = GMFlatpath('flatpath <:::>', 640, 480, road_file='sr_road.txt')
sf.mainloop()
if __name__ == '__main__':
main()
| 28.824576
| 100
| 0.467263
| 9,147
| 68,026
| 3.285886
| 0.075107
| 0.032606
| 0.023789
| 0.014639
| 0.617647
| 0.553234
| 0.499667
| 0.445269
| 0.421613
| 0.401051
| 0
| 0.064184
| 0.397186
| 68,026
| 2,359
| 101
| 28.836795
| 0.668764
| 0.099668
| 0
| 0.460497
| 0
| 0
| 0.046124
| 0.008643
| 0
| 0
| 0
| 0.00212
| 0
| 0
| null | null | 0.003762
| 0.005267
| null | null | 0.007524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c154bd7941e6664ea91468d29e01f725ad32c14
| 2,914
|
py
|
Python
|
app/auth/views.py
|
ifaraag/app
|
d952f0dc58fd703074c19ed3235c1520119baf5f
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
ifaraag/app
|
d952f0dc58fd703074c19ed3235c1520119baf5f
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
ifaraag/app
|
d952f0dc58fd703074c19ed3235c1520119baf5f
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, render_template, redirect, url_for, request, flash
from flask.ext.login import login_required, login_user, logout_user
from werkzeug import check_password_hash, generate_password_hash
from app import db, login_manager, pubnub, app, _callback
from .models import User
from .forms import LoginForm, SignupForm
mod_auth = Blueprint('auth', __name__)
@mod_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
error = None
print(request.method)
if request.method == 'POST':
user = db.users.find_one({'username': request.form['username']})
if not user:
error = 'User does not exist'
elif not check_password_hash(user['password'], request.form['password']):
error = 'Invalid credentials. Please try again.'
else:
user_obj = User(user['username'])
login_user(user_obj)
return redirect(url_for('devices.list_devices'))
return render_template('auth/login.html',
title='Log In to Hydrosmart',
form=form,
error=error)
@mod_auth.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignupForm(request.form)
error = None
if request.method == 'POST':
existing_user = db.users.find_one({'username' :
request.form['username']})
if existing_user:
error = 'Username already exists'
else:
new_user = {'username' : request.form['username'],
'email' : request.form['email'],
'zip' : request.form['zip'],
'password' : generate_password_hash(request.form['password'])}
db.users.insert_one(new_user)
user = db.users.find_one({'username': request.form['username']})
pubnub.channel_group_add_channel(channel_group=app.config['PUBNUB_CHANNEL_GRP'], channel=user['username'])
pubnub.grant(channel=user['username'], auth_key=app.config['PUBNUB_AUTH_KEY'], read=True, write=True, manage=True, ttl=0)
return redirect(url_for('dashboard.dashboard'))
return render_template('auth/signup.html', form=form,
title='Sign Up for Hydrosmart', error=error)
# @mod_auth.route('/googlelogin', methods=['GET', 'POST'])
@mod_auth.route("/logout")
@login_required
def logout():
logout_user()
flash("Logged out.")
return redirect('/login')
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
@login_manager.user_loader
def load_user(username):
u = db.users.find_one({'username': username})
if not u:
return None
return User(u['username'])
def callback(message, channel):
db.data.insert_one(message)
def error(message):
db.data.insert_one(message)
| 37.358974
| 133
| 0.630062
| 344
| 2,914
| 5.162791
| 0.296512
| 0.061937
| 0.027027
| 0.031532
| 0.175113
| 0.078266
| 0.078266
| 0.078266
| 0.078266
| 0.052928
| 0
| 0.000451
| 0.239876
| 2,914
| 77
| 134
| 37.844156
| 0.801354
| 0.019218
| 0
| 0.181818
| 1
| 0
| 0.156162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0.045455
| 0.090909
| 0.015152
| 0.318182
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c18032075b4197ee9055f4f541529df445b2854
| 998
|
py
|
Python
|
tests/cli/conftest.py
|
Aahbree/reference-data-repository
|
f318c0532aaf941ec4f00c8375c9dea45c56f186
|
[
"MIT"
] | null | null | null |
tests/cli/conftest.py
|
Aahbree/reference-data-repository
|
f318c0532aaf941ec4f00c8375c9dea45c56f186
|
[
"MIT"
] | 5
|
2021-01-27T22:17:19.000Z
|
2021-12-14T17:13:58.000Z
|
tests/cli/conftest.py
|
Aahbree/reference-data-repository
|
f318c0532aaf941ec4f00c8375c9dea45c56f186
|
[
"MIT"
] | 5
|
2021-12-08T02:33:44.000Z
|
2021-12-13T03:21:51.000Z
|
# This file is part of the Reference Data Repository (refdata).
#
# Copyright (C) 2021 New York University.
#
# refdata is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Fixtures for testing the command-line interface."""
import os
import pytest
from click.testing import CliRunner
from refdata.db import DB
import refdata.config as config
@pytest.fixture
def refdata_cli(tmpdir):
"""Initialize the environment and the database for the local store."""
basedir = os.path.abspath(str(tmpdir))
connect_url = 'sqlite:///{}'.format(os.path.join(basedir, 'test.db'))
DB(connect_url=connect_url).init()
os.environ[config.ENV_BASEDIR] = basedir
os.environ[config.ENV_URL] = connect_url
# Make sure to reset the database.
yield CliRunner()
# Clear environment variables that were set for the test runner.
del os.environ[config.ENV_BASEDIR]
del os.environ[config.ENV_URL]
| 30.242424
| 78
| 0.733467
| 148
| 998
| 4.885135
| 0.547297
| 0.055325
| 0.082988
| 0.099585
| 0.135546
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004831
| 0.170341
| 998
| 32
| 79
| 31.1875
| 0.868357
| 0.450902
| 0
| 0
| 0
| 0
| 0.035849
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.333333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c1a4912119b5eeaa02dc5d6942de0df8f969733
| 1,783
|
py
|
Python
|
python/jittor/utils/publish.py
|
Jittor/Jittor
|
bc945bae94bded917214b0afe12be6bf5b919dbe
|
[
"Apache-2.0"
] | 4
|
2020-01-12T13:16:16.000Z
|
2020-01-12T15:43:54.000Z
|
python/jittor/utils/publish.py
|
Jittor/Jittor
|
bc945bae94bded917214b0afe12be6bf5b919dbe
|
[
"Apache-2.0"
] | null | null | null |
python/jittor/utils/publish.py
|
Jittor/Jittor
|
bc945bae94bded917214b0afe12be6bf5b919dbe
|
[
"Apache-2.0"
] | 1
|
2020-01-12T13:17:17.000Z
|
2020-01-12T13:17:17.000Z
|
#!/usr/bin/python3
# ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
# Publish steps:
# 1. build,push,upload docker image[jittor/jittor]
# 2. build,push,upload docker image[jittor/jittor-cuda]
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
import os
def run_cmd(cmd):
print("[run cmd]", cmd)
assert os.system(cmd) == 0
def upload_file(path):
run_cmd(f"rsync -avPu {path} jittor-web:Documents/jittor-blog/assets/build/")
def docker_task(name, build_cmd):
run_cmd(build_cmd)
run_cmd(f"sudo docker push {name}")
bname = os.path.basename(name)
run_cmd(f"sudo docker save {name}:latest -o /tmp/{bname}.tgz && sudo chmod 666 /tmp/{bname}.tgz")
upload_file(f"/tmp/{bname}.tgz")
docker_task(
"jittor/jittor-cuda-11-1",
"sudo docker build --tag jittor/jittor-cuda-11-1:latest -f script/Dockerfile_cuda11 . --network host"
)
docker_task(
"jittor/jittor",
"sudo docker build --tag jittor/jittor:latest . --network host"
)
docker_task(
"jittor/jittor-cuda",
"sudo docker build --tag jittor/jittor-cuda:latest --build-arg FROM_IMAGE='nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04' . --network host"
)
docker_task(
"jittor/jittor-cuda-10-1",
"sudo docker build --tag jittor/jittor-cuda-10-1:latest --build-arg FROM_IMAGE='nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04' . --network host"
)
run_cmd("ssh jittor-web Documents/jittor-blog.git/hooks/post-update")
| 34.288462
| 144
| 0.647224
| 258
| 1,783
| 4.403101
| 0.403101
| 0.105634
| 0.098592
| 0.077465
| 0.498239
| 0.387324
| 0.285211
| 0.123239
| 0
| 0
| 0
| 0.029373
| 0.140774
| 1,783
| 52
| 145
| 34.288462
| 0.712141
| 0.326416
| 0
| 0.137931
| 0
| 0.137931
| 0.643098
| 0.296296
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.103448
| false
| 0
| 0.034483
| 0
| 0.137931
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c1d6fd7dc1976bcfc2727fbe10b4b7b22073b1a
| 705
|
py
|
Python
|
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
input = 368078
size = 1
s_size = size * size # squared size
while (s_size < input):
size += 2
s_size = size * size
bottom_right = s_size
bottom_left = s_size - size + 1
top_left = s_size - 2 * size + 2
top_right = s_size - 3 * size + 3
input_x = -1
input_y = -1
# bottom horizontal line
if (input > bottom_left):
input_x = size - 1
input_y = input - bottom_left
elif (input > top_left):
input_y = input - top_left
input_x = 0
elif (input > top_right):
input_x = 0
input_y = size - input + top_right - 1
else:
input_x = top_right - input
input_y = size - 1
ap_x = size / 2
ap_y = ap_x
print abs(ap_x - input_x) + abs(ap_y - input_y)
| 19.054054
| 47
| 0.631206
| 125
| 705
| 3.28
| 0.224
| 0.085366
| 0.065854
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040462
| 0.26383
| 705
| 36
| 48
| 19.583333
| 0.749518
| 0.079433
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c1dfdf1304b0b11fe75fef3682da8277a3d5207
| 2,981
|
py
|
Python
|
racer/methods/genetic_programming/parameterized.py
|
max-eth/racer
|
952991aedec5d8229bb1126c9c066613f5c30146
|
[
"MIT"
] | 1
|
2022-02-26T00:10:03.000Z
|
2022-02-26T00:10:03.000Z
|
racer/methods/genetic_programming/parameterized.py
|
max-eth/racer
|
952991aedec5d8229bb1126c9c066613f5c30146
|
[
"MIT"
] | null | null | null |
racer/methods/genetic_programming/parameterized.py
|
max-eth/racer
|
952991aedec5d8229bb1126c9c066613f5c30146
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
from racer.utils import load_pickle
from racer.methods.genetic_programming.program_tree import ProgramTree
class ParameterizedTree(ProgramTree):
# This makes the assumption that all children of the underlying tree are in a field .children and that the underlying tree has the field .name
def __init__(self, underlying_tree, init_fct=None, _copy=True):
if _copy:
underlying_tree = copy.deepcopy(underlying_tree) # safety first
if hasattr(underlying_tree, "children"):
underlying_tree.children = [
ParameterizedTree(underlying_tree=child, _copy=False)
for child in underlying_tree.children
]
self.underlying_tree = underlying_tree
if init_fct is None:
self.set_params([1, 0])
else:
self.set_params(init_fct())
def set_params(self, params):
self.weight, self.bias = params
self.name = self.underlying_tree.name + " * {} + {}".format(
self.weight, self.bias
)
def get_params(self):
return [self.weight, self.bias]
def __call__(self, *x):
return self.underlying_tree(*x) * self.weight + self.bias
def __len__(self):
return len(self.underlying_tree)
def display(self, prefix):
res = prefix + self.name + "\n"
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
res += child.display(prefix=" " + prefix)
return res
def _set_dirty(self):
raise Exception("Parameterized trees should not be mutated")
def in_order(self):
yield self
if hasattr(self.underlying_tree, "children"):
for child in self.underlying_tree.children:
for node in child.in_order():
yield node
class ParameterizedIndividual:
def __init__(self, parameterized_trees):
self.parameterized_trees = parameterized_trees
@staticmethod
def from_individual(ind):
return ParameterizedIndividual(
parameterized_trees=[ParameterizedTree(tree) for tree in ind.trees]
)
@staticmethod
def from_pickled_individual(fname):
return ParameterizedIndividual.from_individual(load_pickle(fname))
def __call__(self, *x):
return [tree(*x) for tree in self.parameterized_trees]
def __len__(self):
return sum(len(tree) for tree in self.parameterized_trees)
def set_flat_parameters(self, params):
n_used = 0
for tree in self.parameterized_trees:
for node in tree.in_order():
node.set_params(list(params[n_used : n_used + 2]))
n_used += 2
def get_flat_parameters(self):
params = []
for tree in self.parameterized_trees:
for node in tree.in_order():
params += node.get_params()
return np.array(params)
| 32.402174
| 146
| 0.637035
| 356
| 2,981
| 5.103933
| 0.255618
| 0.13869
| 0.089158
| 0.039626
| 0.226197
| 0.171712
| 0.171712
| 0.134287
| 0.134287
| 0.134287
| 0
| 0.002325
| 0.27843
| 2,981
| 91
| 147
| 32.758242
| 0.842399
| 0.051325
| 0
| 0.2
| 0
| 0
| 0.027965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0
| 0.057143
| 0.1
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c28fc0563fc8f73fd257c1d3e24a953c2e9ec7c
| 1,780
|
py
|
Python
|
src/compas/datastructures/mesh/bbox.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | 2
|
2021-03-17T18:14:22.000Z
|
2021-09-19T13:50:02.000Z
|
src/compas/datastructures/mesh/bbox.py
|
arpastrana/compas
|
ed677a162c14dbe562c82d72f370279259faf7da
|
[
"MIT"
] | 9
|
2019-09-11T08:53:19.000Z
|
2019-09-16T08:35:39.000Z
|
src/compas/datastructures/mesh/bbox.py
|
Licini/compas
|
34f65adb3d0abc3f403312ffba62aa76f3376292
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import bounding_box
from compas.geometry import bounding_box_xy
__all__ = [
'mesh_bounding_box',
'mesh_bounding_box_xy',
]
def mesh_bounding_box(mesh):
"""Compute the (axis aligned) bounding box of a mesh.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
Returns
-------
list of point
The 8 corners of the bounding box of the mesh.
Examples
--------
>>> mesh_bounding_box(mesh)
[[0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0]]
"""
xyz = mesh.vertices_attributes('xyz', keys=list(mesh.vertices()))
return bounding_box(xyz)
def mesh_bounding_box_xy(mesh):
"""Compute the (axis aligned) bounding box of a projection of the mesh in the XY plane.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
Returns
-------
list of point
The 4 corners of the bounding polygon in the XY plane.
Examples
--------
>>> mesh_bounding_box_xy(mesh)
[[0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 10.0, 0.0], [0.0, 10.0, 0.0]]
"""
xyz = mesh.vertices_attributes('xyz')
return bounding_box_xy(xyz)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
import doctest
import compas
from compas.datastructures import Mesh
mesh = Mesh.from_obj(compas.get('faces.obj'))
doctest.testmod()
| 23.733333
| 148
| 0.561236
| 247
| 1,780
| 3.842105
| 0.206478
| 0.096944
| 0.110643
| 0.101159
| 0.514226
| 0.478398
| 0.404636
| 0.404636
| 0.404636
| 0.322445
| 0
| 0.061297
| 0.211798
| 1,780
| 74
| 149
| 24.054054
| 0.61511
| 0.538202
| 0
| 0
| 0
| 0
| 0.085592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.380952
| 0
| 0.571429
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c2c549754955b919f978ac6624f7aa2371b569a
| 19,500
|
py
|
Python
|
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | null | null | null |
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | null | null | null |
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | 2
|
2021-10-11T09:28:00.000Z
|
2021-10-14T10:30:11.000Z
|
from flask import Flask, render_template, request, jsonify,send_file, redirect,session, url_for
from werkzeug import secure_filename
import os
import utilities, queries
import logger
from flask_cors import CORS, cross_origin
from datetime import timedelta
app = Flask(__name__)
CORS(app)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
UPLOAD_FOLDER = '/home/pi/Desktop/AFKZenCoders/PS12/uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['CORS_HEADERS'] = 'Content-Type'
app.secret_key = "AFKZenCodersAAS"
app.permanent_session_lifetime = timedelta(minutes=60)
@app.route('/')
def hello():
if "username" in session:
logger.logit("Rendered upload.html - test wali")
return render_template('upload.html')
else:
logger.logit("Session does not exist")
logger.logit("Rendered root '/'")
return render_template('index.html')
@app.route('/restart')
def restart():
logger.logit(f"---GOOGLE RESTART---")
os.system("sudo reboot -h now")
@app.route('/userauth', methods = ['POST','GET'])
def userauth():
username = request.form.get('username')
password = request.form.get('password')
if username=="root" and password=="toor":
logger.logit(f"Success LOGIN Request Username:{username} Password:{password}")
session["username"] = username
session.permanent = True
return redirect(url_for("page_upload"))
else:
logger.logit(f"Failure LOGIN Request Username:{username} Password:{password}")
return redirect("http://www.themedallionschool.com/abhinav/PS12/incorrect.html", code=302)
@app.route('/page_upload')
def page_upload():
if "username" in session:
logger.logit("Rendered upload.html")
return render_template('upload.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/page_cdr')
def page_cdr():
if "username" in session:
logger.logit("Rendered cdr.html")
return render_template('cdr.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/page_fir')
def page_fir():
if "username" in session:
logger.logit("Rendered fir.html")
return render_template('fir.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/logout')
def logout():
if "username" in session:
session.pop("username", None)
logger.logit("Successfull logout")
return redirect("/")
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/upload')
def upload_file():
logger.logit("Rendered upload.html - test wali")
return render_template('upload.html')
@app.route('/uploader',methods=['GET','POST'])
def uploader():
uploaded_files = request.files.getlist("file")
#number = request.args.get('number')
#number = "7982345234"
#print(uploaded_files)
logger.logit(f"/ยฐ Multiple Files Upload Start")
for file in uploaded_files:
filename = secure_filename(file.filename)
if filename=="917982345234.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="918367448476.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="916100080762.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="CGI_Dataset.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| CGIData Saved")
utilities.addCGIData(path)
elif filename=="Bank_Details.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| Bank_Details Saved")
utilities.addBankData(path)
elif filename=="FIR_Dataset.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| FIR_Dataset Saved")
utilities.addFIRData(path)
elif filename=="Thana.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| Thana Saved")
utilities.addThanaData(path)
elif filename=="Thana_list_UP.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
# print(path,file,filename)
# /home/pi/Desktop/AFKZenCoders/PS12/uploads/Thana_list_UP.csv <FileStorage: 'Thana_list_UP.csv' ('application/vnd.ms-excel')> Thana_list_UP.csv
file.save(path)
logger.logit("| Thana_list_UP Saved")
utilities.addthanaListData(path)
else:
logger.logit(f"File Upload error - {filename}")
logger.logit(f"\. Multiple Files Uploaded - {len(uploaded_files)}")
return render_template('cdr.html')
@app.route('/uploader/cdr', methods = ['GET', 'POST'])
def upload_cdr_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
number = request.files['number']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
logger.logit("CDRData Saved")
print("CDR File Saved successfully")
# Loading File To Database
utilities.addCDRData(path_of_csv,number)
return "CDR File Saved and Loaded to Database Successfully"
@app.route('/uploader/thana', methods = ['GET', 'POST'])
def upload_thana_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
logger.logit("ThanaData Saved")
print("Thana File Saved successfully")
# Loading File To Database
utilities.addThanaData(path_of_csv)
return "Thana File Saved and Loaded to Database Successfully"
@app.route('/uploader/bankacc', methods = ['GET', 'POST'])
def upload_bankacc_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("BankAcc File Saved successfully")
logger.logit("BankData Saved")
# Loading File To Database
utilities.addBankData(path_of_csv)
return "BankAcc File Saved and Loaded to Database Successfully"
@app.route('/uploader/cgi', methods = ['GET', 'POST'])
def upload_cgi_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("CGI File Saved successfully")
logger.logit("CGIData Saved")
# Loading File To Database
utilities.addCGIData(path_of_csv)
return "CGI File Saved and Loaded to Database Successfully"
@app.route('/uploader/fir', methods = ['GET', 'POST'])
def upload_fir_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("FIR File Saved successfully")
logger.logit("FIRData Saved")
# Loading File To Database
utilities.addFIRData(path_of_csv)
return "FIR File Saved and Loaded to Database Successfully"
@app.route('/uploader/thanalist', methods = ['GET', 'POST'])
def upload_thanalist_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("Thana List File Saved successfully")
logger.logit("ThanaListDATA Saved")
# Loading File To Database
utilities.addthanaListData(path_of_csv)
return "Thana File Saved and Loaded to Database Successfully"
# ############################### Queries ##################################
@app.route('/query/1/', methods = ['GET'])
def query_1():
headers = ["Calling Number","Called Number","Start Time","Duration(sec)","Call Type"]
query = "SELECT calling_number, called_number, start_time, duration, cell_type FROM CallData ORDER BY duration DESC"
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 1 Call")
return jsonify(response)
@app.route('/query/2/', methods = ['GET'])
def query_2():
# Parsing the Headers
since = str(request.args.get('since')) + " 00:00:00"
till = str(request.args.get('till')) + " 23:59:59"
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
query = f'SELECT * FROM CallData WHERE start_time < "{till}" AND start_time > "{since}";'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 2 Call since:{since}, till:{till}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/3/', methods = ['GET'])
def query_3():
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
query = f"SELECT * FROM CallData ORDER BY duration DESC LIMIT 10"
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 3 Call")
return jsonify(response)
@app.route('/query/4/', methods = ['GET'])
def query_4():
headers = ["Dialled Number","Total Dialled Calls","Total Duration"]
query = f'''SELECT called_number, count(*) as 'Frequency', sum(duration) as 'Total Duration' from CallData where cell_type="OUT" GROUP by called_number ORDER by Frequency DESC'''
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 4 Call")
return jsonify(response)
@app.route('/query/5/', methods = ['GET'])
def query_5():
headers = ["Caller","Total Recieved Calls","Total Duration"]
query = f'''SELECT calling_number, count(*) as 'Frequency', sum(duration) as 'Total Duration' from CallData where cell_type="IN" GROUP by calling_number ORDER by Frequency DESC'''
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 5 Call")
return jsonify(response)
@app.route('/query/6/', methods = ['GET'])
def query_6():
headers = ["Called Number","Total Duration(sec)"]
query = f"SELECT DISTINCT called_number, sum(duration) as totalDuration FROM CallData WHERE called_number NOT in (7982345234) GROUP BY called_number ORDER BY totalDuration DESC "
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 6 Call")
return jsonify(response)
@app.route('/query/7/', methods = ['GET'])
def query_7():
headers = ["Called Number","Duration","Call Type"]
query = f'SELECT called_number, duration, cell_type FROM CallData WHERE cell_type="OUT" ORDER by duration DESC'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 7 Call")
return jsonify(response)
@app.route('/query/8/', methods = ['GET'])
def query_8():
headers = ["Calling Number","Duration","Call Type"]
query = f'SELECT calling_number, duration, cell_type FROM CallData WHERE cell_type="IN" ORDER by duration DESC'
result = queries.runQuery(query)
headers = ["Phone NO","Duration","Call Type"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 8 Call")
return jsonify(response)
@app.route('/query/9/', methods = ['GET'])
def query_9():
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
# Parsing the Headers
date = request.args.get('date')
query = f'SELECT * from CallData where start_time like "{date}%" or end_time like "{date}%"'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 10 Call date:{date}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/10/', methods = ['GET'])
def query_10():
headers = ["Start Time","End Time","Tower 1","Tower 2"]
# Parsing the Headers
date = request.args.get('date')
query = f'''SELECT start_time, end_time, cell1, cell2 from CallData where (start_time like "2021-01-04%" or end_time like "2021-01-04%")'''
result = queries.runQuery(query)
#print(result)
fString = f">>> Query 10 Call date:{date}"
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(fString)
return jsonify(response)
@app.route('/query/11/', methods = ['GET'])
def query_11():
query = f'''SELECT DISTINCT called_number FROM CallData WHERE cell_type="OUT" UNION SELECT DISTINCT calling_number FROM CallData WHERE cell_type="IN"'''
result = queries.runQuery(query)
#print(result)
#res = []
#for item in result:
# res.append(item[0])
headers = ["Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 11 Call")
return jsonify(response)
@app.route('/query/12/', methods = ['GET'])
def query_12():
# Parsing the Headers
number = request.args.get('number')
query = f'''SELECT * FROM CallData WHERE called_number="{number}" or calling_number="{number}"'''
result = queries.runQuery(query)
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 12 Call number:{number}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/20/', methods = ['GET'])
def query_20():
# Parsing the Headers
fir = request.args.get('fir')
query = f'SELECT * from FIR WHERE FIR_No={int(fir)}'
result = queries.runQuery(query)
#print(result)
headers = ["FIR No","District","PS ID","Time of FIR","Complainant","Act","Section","Complainant Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 20 Call for:{fir}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/100/', methods = ['GET'])
def query_100():
# Parsing the Headers
IMEI = request.args.get('imei')
query = f'SELECT * from FIR WHERE FIR_No={int(fir)}'
result = queries.runQuery(query)
#print(result)
headers = ["FIR No","District","PS ID","Time of FIR","Complainant","Act","Section","Complainant Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 100 Call IMEI:{imei}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/101/', methods = ['GET'])
def query_101():
#unique IMEIs
IMEI = []
unique_imeis_query = f'SELECT DISTINCT imei FROM CallData'
resultset = queries.runQuery(unique_imeis_query)
for results in resultset:
print(results)
#unique_imsi_query = f'SELECT * from CallData where imei={results}'
return ("OK", code=200)
#unique_imsi =
@app.route('/loadedfiles', methods = ['GET'])
def loadedfiles():
csv_files = []
for filename in os.listdir("/home/pi/Desktop/AFKZenCoders/PS12/uploads/"):
if filename.endswith(".csv"):
csv_files.append(filename)
logger.logit("Rendered uploaded files")
return jsonify({'CSV files':csv_files})
@app.route('/deleteloaded', methods = ['GET'])
def deleteloaded():
csv_files = []
for filename in os.listdir("/home/pi/Desktop/AFKZenCoders/PS12/uploads/"):
if filename.endswith(".csv"):
fstring = f"/home/pi/Desktop/AFKZenCoders/PS12/uploads/{filename}"
os.remove(fstring)
os.remove("/home/pi/Desktop/AFKZenCoders/PS12/CDRdata.db")
logger.logit("### Files Deleted ###")
return jsonify({'CSV files':csv_files})
# Download API
@app.route("/downloadfile/<filename>", methods = ['GET'])
def download_file(filename):
logger.logit("Rendered download.html")
return render_template('download.html',value=filename)
@app.route('/return-files/<filename>')
def return_files_tut(filename):
file_path = "/home/pi/Desktop/AFKZenCoders/PS12/CDRdata.db"
logger.logit("Database Downloaded")
return send_file(file_path, as_attachment=True, attachment_filename='')
@app.route('/logs')
def logs():
with open("/home/pi/Desktop/AFKZenCoders/PS12/Logs.txt","r") as f:
lines = f.readlines()
f.close()
formated_lines = []
for i in range(len(lines)-1,0,-1):
formated_lines.append(lines[i])
return jsonify({'logs':formated_lines})
@app.route('/graph')
def graph():
query = f'SELECT date,in_count,out_count,sms_count,total from "798234523"'
result = queries.runQuery(query)
#print(result)
headers = ["Date","Incomming Calls","OutGoing Calls","SMS","Total Interactions"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> GRAPH Call"
logger.logit(fString)
return jsonify(response)
if __name__ == "__main__":
app.run(host='0.0.0.0',port = 1313,debug = True)
| 38.16047
| 182
| 0.655385
| 2,475
| 19,500
| 5.080808
| 0.117172
| 0.044612
| 0.012883
| 0.02505
| 0.673797
| 0.629026
| 0.557694
| 0.508072
| 0.493201
| 0.448668
| 0
| 0.014299
| 0.182308
| 19,500
| 511
| 183
| 38.16047
| 0.774287
| 0.051795
| 0
| 0.48227
| 0
| 0.01182
| 0.345304
| 0.025683
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.009456
| 0.016548
| null | null | 0.016548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c3522929deb4bb2524b97c1af2b5f08df9a050e
| 5,585
|
py
|
Python
|
backend/0_publish_audio.py
|
bmj-hackathon/ethberlinzwei-babelfish_3_0
|
e986ad1b9fa896f20d7cdd296d130d804f55ecfa
|
[
"Apache-2.0"
] | 1
|
2019-08-28T12:12:09.000Z
|
2019-08-28T12:12:09.000Z
|
backend/0_publish_audio.py
|
bmj-hackathon/ethberlinzwei-babelfish_3_0
|
e986ad1b9fa896f20d7cdd296d130d804f55ecfa
|
[
"Apache-2.0"
] | 8
|
2020-09-07T01:00:44.000Z
|
2022-03-02T05:19:32.000Z
|
backend/0_publish_audio.py
|
bmj-hackathon/ethberlinzwei-babelfish_3_0
|
e986ad1b9fa896f20d7cdd296d130d804f55ecfa
|
[
"Apache-2.0"
] | 3
|
2019-08-24T20:36:08.000Z
|
2021-02-18T20:28:11.000Z
|
import sys
import logging
# loggers_dict = logging.Logger.manager.loggerDict
#
# logger = logging.getLogger()
# logger.handlers = []
#
# # Set level
# logger.setLevel(logging.DEBUG)
#
# # FORMAT = "%(asctime)s - %(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s"
# # FORMAT = "%(asctime)s %(levelno)s: %(module)30s %(message)s"
# FORMAT = "%(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s"
#
# DATE_FMT = "%Y-%m-%d %H:%M:%S"
# DATE_FMT = "%Y-%m-%d %H:%M:%S"
# formatter = logging.Formatter(FORMAT, DATE_FMT)
#
# # Create handler and assign
# handler = logging.StreamHandler(sys.stderr)
# handler.setFormatter(formatter)
# logger.handlers = [handler]
# logger.debug("Logging started")
#%%
# Standard imports
import os
from pathlib import Path
import json
from time import sleep
# Ocean imports
import squid_py
from squid_py.ocean.ocean import Ocean
from squid_py.config import Config
from pprint import pprint
import mantaray_utilities as manta_utils
from mantaray_utilities.user import password_map
#%% CONFIG
OCEAN_CONFIG_PATH = Path().cwd() / 'config_nile.ini'
assert OCEAN_CONFIG_PATH.exists(), "{} - path does not exist".format(OCEAN_CONFIG_PATH)
os.environ['OCEAN_CONFIG_PATH'] = str(OCEAN_CONFIG_PATH)
PASSWORD_PATH=Path().cwd() / ".nile_passwords"
assert PASSWORD_PATH.exists()
os.environ["PASSWORD_PATH"] = str(PASSWORD_PATH)
MARKET_PLACE_PROVIDER_ADDRESS="0x376817c638d2a04f475a73af37f7b51a2862d567"
os.environ["MARKET_PLACE_PROVIDER_ADDRESS"] = MARKET_PLACE_PROVIDER_ADDRESS
JSON_TEMPLATE = Path().cwd() / 'metadata_template.json'
assert JSON_TEMPLATE.exists()
#%% ARGPARSE
import argparse
parser = argparse.ArgumentParser(description='Publish audio')
parser.add_argument('--url', type=str, help='URL for input audio file')
parser.add_argument('--price', type=int, help='Selling price in Ocean token')
parser.add_argument('--reward', type=int, help='Reward offered in Ocean token')
parser.add_argument('--number-nodes', type=int, help='Number of processor nodes requested')
args = parser.parse_args()
logging.info("************************************************************".format())
logging.info("*** ETHBERLINZWEI HACKATHON ***".format())
logging.info("*** SPEECH2TEXT ***".format())
logging.info("*** STEP 1 - CLIENT REGISTERS A CLIP INTO OCEAN PROTOCOL ***".format())
logging.info("************************************************************".format())
logging.info("".format())
logging.info("(Step 1.1 not implemented - upload audio file from client to storage)".format())
logging.info("Publishing Audio to NILE network: {}".format(args.url))
logging.info("Will set price to {} OCEAN".format(args.price))
logging.info("Offering {} OCEAN reward".format(args.reward))
logging.info("Requesting {} processors".format(args.number_nodes))
logging.info("".format())
#%%
# Get the configuration file path for this environment
logging.info("Configuration file selected: {}".format(OCEAN_CONFIG_PATH))
# logging.critical("Deployment type: {}".format(manta_utils.config.get_deployment_type()))
logging.info("Squid API version: {}".format(squid_py.__version__))
#%%
# Instantiate Ocean with the default configuration file.
configuration = Config(OCEAN_CONFIG_PATH)
squid_py.ConfigProvider.set_config(configuration)
ocn = Ocean(configuration)
#%%
# Get a publisher account
publisher_acct = manta_utils.user.get_account_by_index(ocn,0)
#%%
logging.info("Publisher account address: {}".format(publisher_acct.address))
logging.info("Publisher account Testnet 'ETH' balance: {:>6.1f}".format(ocn.accounts.balance(publisher_acct).eth/10**18))
logging.info("Publisher account Testnet Ocean balance: {:>6.1f}".format(ocn.accounts.balance(publisher_acct).ocn/10**18))
def publish(url, price, reward, number_nodes):
# metadata = squid_py.ddo.metadata.Metadata.get_example()
# print('Name of asset:', metadata['base']['name'])
with open(JSON_TEMPLATE, 'r') as f:
metadata = json.load(f)
metadata['base']['files'][0]['url'] = url
metadata['base']['price'] = str(price)
metadata['additionalInformation']['reward'] = str(reward)
metadata['additionalInformation']['numberNodes'] = str(number_nodes)
ddo = ocn.assets.create(metadata, publisher_acct)
registered_did = ddo.did
logging.info("New asset registered at {}".format(str(registered_did)))
logging.info("Asset name: {}".format(metadata['base']['name']))
logging.info("Encrypted files to secret store, cipher text: [{}...] . ".format(ddo.metadata['base']['encryptedFiles'][:50]))
return registered_did
registered_did = publish(args.url, args.price, args.reward, args.number_nodes)
#TODO: Better handling based on reciept
print("Wait for the transaction to complete!")
sleep(10)
# %%
ddo = ocn.assets.resolve(registered_did)
# print("Asset '{}' resolved from Aquarius metadata storage: {}".format(ddo.did,ddo.metadata['base']['name']))
# %% [markdown]
# Similarly, we can verify that this asset is registered into the blockchain, and that you are the owner.
# %%
# We need the pure ID string as in the DID registry (a DID without the prefixes)
asset_id = squid_py.did.did_to_id(registered_did)
owner = ocn._keeper.did_registry.contract_concise.getDIDOwner(asset_id)
# print("Asset ID", asset_id, "owned by", owner)
assert str.lower(owner) == str.lower(publisher_acct.address)
logging.info("".format())
logging.info("Successfully registered Audio!".format())
logging.info("Asset Owner: {}".format(owner))
logging.info("Asset DID: {}".format(registered_did))
| 36.986755
| 128
| 0.708684
| 721
| 5,585
| 5.363384
| 0.306519
| 0.06827
| 0.039566
| 0.024825
| 0.138092
| 0.088699
| 0.049651
| 0.049651
| 0.03129
| 0
| 0
| 0.012566
| 0.116562
| 5,585
| 150
| 129
| 37.233333
| 0.77118
| 0.253715
| 0
| 0.067568
| 0
| 0
| 0.315175
| 0.062014
| 0
| 0
| 0.010214
| 0.006667
| 0.054054
| 1
| 0.013514
| false
| 0.054054
| 0.175676
| 0
| 0.202703
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7c359f84b8ac8bafab4c67c76d69bd091361babb
| 3,613
|
py
|
Python
|
nexpose/nexpose_vulnerabilityexception.py
|
Patralos/nexpose-client-python
|
bec81da29883b1b004046e29a9e7f7a6686467c1
|
[
"BSD-3-Clause"
] | 29
|
2017-06-27T04:44:03.000Z
|
2021-11-29T15:04:00.000Z
|
nexpose/nexpose_vulnerabilityexception.py
|
Patralos/nexpose-client-python
|
bec81da29883b1b004046e29a9e7f7a6686467c1
|
[
"BSD-3-Clause"
] | 40
|
2017-06-21T18:00:49.000Z
|
2018-06-06T21:13:34.000Z
|
nexpose/nexpose_vulnerabilityexception.py
|
Patralos/nexpose-client-python
|
bec81da29883b1b004046e29a9e7f7a6686467c1
|
[
"BSD-3-Clause"
] | 23
|
2017-07-18T16:40:57.000Z
|
2021-01-26T09:58:53.000Z
|
# Future Imports for py2/3 backwards compat.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import object
from .xml_utils import get_attribute, get_content_of
from future import standard_library
standard_library.install_aliases()
def fix_null(data):
if data == 'null':
return 0
return data
class VulnerabilityExceptionStatus(object):
UNDER_REVIEW = "Under Review"
APPROVED = "Approved"
REJECTED = "Rejected"
DELETED = "Deleted" # This state is also used for recalled exceptions!
class VulnerabilityExceptionReason(object):
FALSE_POSITIVE = "False Positive"
COMPENSATING_CONTROL = "Compensating Control"
ACCEPTABLE_USE = "Acceptable Use"
ACCEPTABLE_RISK = "Acceptable Risk"
OTHER = "Other"
class VulnerabilityExceptionScope(object):
ALL_INSTANCES = "All Instances"
ALL_INSTANCES_SPECIFIC_ASSET = "All Instances on a Specific Asset"
ALL_INSTANCES_SPECIFIC_SITE = "All Instances on a Specific Site"
SPECIFIC_INSTANCE_SPECIFIC_ASSET = "Specific Instance of Specific Asset"
class SiloVulnerabilityExceptionDetails(object):
@staticmethod
def CreateFromXML(xml_data):
details = SiloVulnerabilityExceptionDetails()
details.silo_id = get_attribute(xml_data, 'siloId', details.silo_id)
details.oldest_exception_creation_date = get_attribute(xml_data, 'oldestExceptionCreationDate', details.oldest_exception_creation_date) # TODO: date object
details.pending_exception_count = get_attribute(xml_data, 'pendingVulnExceptionsCount', details.pending_exception_count)
return details
def __init__(self):
self.silo_id = ''
self.oldest_exception_creation_date = 'N/A' # TODO: date object
self.pending_exception_count = 0
class VulnerabilityException(object):
@staticmethod
def CreateFromXML(xml_data):
details = VulnerabilityException()
details.id = int(get_attribute(xml_data, 'exception-id', details.id))
details.vulnerability_id = get_attribute(xml_data, 'vuln-id', details.vulnerability_id)
details.vulnerability_key = get_attribute(xml_data, 'vuln-key', details.vulnerability_key)
details.expiration_date = get_attribute(xml_data, 'expiration-date', details.expiration_date) # TODO: date object
details.submitter = get_attribute(xml_data, 'submitter', details.submitter)
details.submitter_comment = get_content_of(xml_data, 'submitter-comment', details.submitter_comment)
details.reviewer = get_attribute(xml_data, 'reviewer', details.reviewer)
details.reviewer_comment = get_content_of(xml_data, 'reviewer-comment', details.reviewer_comment)
details.status = get_attribute(xml_data, 'status', details.status)
details.reason = get_attribute(xml_data, 'reason', details.reason)
details.scope = get_attribute(xml_data, 'scope', details.scope)
details.asset_id = int(fix_null(get_attribute(xml_data, 'device-id', details.asset_id)))
details.asset_port = int(fix_null(get_attribute(xml_data, 'port-no', details.asset_port)))
return details
def __init__(self):
self.id = 0
self.vulnerability_id = ''
self.vulnerability_key = ''
self.expiration_date = '' # TODO: date object
self.submitter = ''
self.submitter_comment = ''
self.reviewer = ''
self.reviewer_comment = ''
self.status = ''
self.reason = ''
self.scope = ''
self.asset_id = 0
self.asset_port = 0
| 42.011628
| 164
| 0.715749
| 408
| 3,613
| 6.056373
| 0.245098
| 0.050992
| 0.084986
| 0.107649
| 0.231081
| 0.10603
| 0.062323
| 0
| 0
| 0
| 0
| 0.002406
| 0.194575
| 3,613
| 85
| 165
| 42.505882
| 0.846735
| 0.045115
| 0
| 0.114286
| 0
| 0
| 0.118211
| 0.015394
| 0
| 0
| 0
| 0.011765
| 0
| 1
| 0.071429
| false
| 0
| 0.057143
| 0
| 0.442857
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c3d1d7e925f2c1752e9865895938aea4dee29d9
| 6,830
|
py
|
Python
|
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.utils.functional import wraps
from django.utils.http import urlquote
from django.db.models import Model, get_model
from django.db.models.base import ModelBase
from django.db.models.query import QuerySet
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from guardian.conf import settings as guardian_settings
from guardian.exceptions import GuardianError
def permission_required(perm, lookup_variables=None, **kwargs):
"""
Decorator for views that checks whether a user has a particular permission
enabled.
Optionally, instances for which check should be made may be passed as an
second argument or as a tuple parameters same as those passed to
``get_object_or_404`` but must be provided as pairs of strings.
:param login_url: if denied, user would be redirected to location set by
this parameter. Defaults to ``django.conf.settings.LOGIN_URL``.
:param redirect_field_name: name of the parameter passed if redirected.
Defaults to ``django.contrib.auth.REDIRECT_FIELD_NAME``.
:param return_403: if set to ``True`` then instead of redirecting to the
login page, response with status code 403 is returned (
``django.http.HttpResponseForbidden`` instance or rendered template -
see :setting:`GUARDIAN_RENDER_403`). Defaults to ``False``.
:param accept_global_perms: if set to ``True``, then *object level
permission* would be required **only if user does NOT have global
permission** for target *model*. If turned on, makes this decorator
like an extension over standard
``django.contrib.admin.decorators.permission_required`` as it would
check for global permissions first. Defaults to ``False``.
Examples::
@permission_required('auth.change_user', return_403=True)
def my_view(request):
return HttpResponse('Hello')
@permission_required('auth.change_user', (User, 'username', 'username'))
def my_view(request, username):
user = get_object_or_404(User, username=username)
return user.get_absolute_url()
@permission_required('auth.change_user',
(User, 'username', 'username', 'groups__name', 'group_name'))
def my_view(request, username, group_name):
user = get_object_or_404(User, username=username,
group__name=group_name)
return user.get_absolute_url()
"""
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME)
return_403 = kwargs.pop('return_403', False)
accept_global_perms = kwargs.pop('accept_global_perms', False)
# Check if perm is given as string in order not to decorate
# view function itself which makes debugging harder
if not isinstance(perm, basestring):
raise GuardianError("First argument must be in format: "
"'app_label.codename or a callable which return similar string'")
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
# if more than one parameter is passed to the decorator we try to
# fetch object for which check would be made
obj = None
if lookup_variables:
model, lookups = lookup_variables[0], lookup_variables[1:]
# Parse model
if isinstance(model, basestring):
splitted = model.split('.')
if len(splitted) != 2:
raise GuardianError("If model should be looked up from "
"string it needs format: 'app_label.ModelClass'")
model = get_model(*splitted)
elif type(model) in (Model, ModelBase, QuerySet):
pass
else:
raise GuardianError("First lookup argument must always be "
"a model, string pointing at app/model or queryset. "
"Given: %s (type: %s)" % (model, type(model)))
# Parse lookups
if len(lookups) % 2 != 0:
raise GuardianError("Lookup variables must be provided "
"as pairs of lookup_string and view_arg")
lookup_dict = {}
for lookup, view_arg in zip(lookups[::2], lookups[1::2]):
if view_arg not in kwargs:
raise GuardianError("Argument %s was not passed "
"into view function" % view_arg)
lookup_dict[lookup] = kwargs[view_arg]
obj = get_object_or_404(model, **lookup_dict)
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_perm = accept_global_perms and request.user.has_perm(perm)
if not has_perm and not request.user.has_perm(perm, obj):
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist, e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator
def permission_required_or_403(perm, *args, **kwargs):
"""
Simple wrapper for permission_required decorator.
Standard Django's permission_required decorator redirects user to login page
in case permission check failed. This decorator may be used to return
HttpResponseForbidden (status 403) instead of redirection.
The only difference between ``permission_required`` decorator is that this
one always set ``return_403`` parameter to ``True``.
"""
kwargs['return_403'] = True
return permission_required(perm, *args, **kwargs)
| 47.762238
| 80
| 0.630893
| 796
| 6,830
| 5.258794
| 0.273869
| 0.026278
| 0.028428
| 0.016722
| 0.11419
| 0.053989
| 0.043
| 0.043
| 0
| 0
| 0
| 0.014131
| 0.295461
| 6,830
| 142
| 81
| 48.098592
| 0.855777
| 0.048609
| 0
| 0.027027
| 0
| 0
| 0.116797
| 0.005387
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.027027
| 0.175676
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c46086ba91c653227726b101b253bd36be2a7f4
| 5,963
|
py
|
Python
|
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
"""
Main tokenizer.
"""
from itertools import *
import sys, random
import util
import ply.lex as lex
class Lexer:
"""
Lexer for boolean rules
"""
literals = '=*,'
tokens = (
'LABEL', 'ID','STATE', 'ASSIGN', 'EQUAL',
'AND', 'OR', 'NOT',
'NUMBER', 'LPAREN','RPAREN', 'COMMA',
)
reserved = {
'and' : 'AND',
'or' : 'OR',
'not' : 'NOT',
'True' : 'STATE',
'False' : 'STATE',
'Random' : 'STATE',
}
def __init__(self, **kwargs):
# nothing here yet
self.lexer = lex.lex(object=self, **kwargs)
def t_ID( self, t):
"[a-zA-Z_\+\-][a-zA-Z_0-9\+\-]*"
# check for reserved words
t.type = self.reserved.get( t.value, 'ID')
return t
def t_LABEL (self, t):
"[0-9][0-9]*:"
t.value = int(t.value[:-1])
return t
def t_NUMBER(self, t):
"[\+-]*\d+\.?\d*"
try:
t.value = float(t.value)
except ValueError:
util.error( "value too large", t.value )
return t
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_ASSIGN = r'\*'
t_EQUAL = r'='
t_COMMA = r','
t_ignore = ' \t'
t_ignore_COMMENT = r'\#.*'
def t_newline(self, t):
"Newline handling"
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
"Error message"
msg = "lexer error in '%s' at '%s'" % (self.last, t.value)
util.error( msg )
def tokenize_line(self, line ):
"Runs the lexer a single line retutns a list of tokens"
tokens = []
self.last = line
self.lexer.input( line )
while 1:
t = self.lexer.token()
if t:
tokens.append(t)
else:
break
return tokens
def tokenize_text(self, text):
"Runs the lexer on text and returns a list of lists of tokens"
return map( self.tokenize_line, util.split(text) )
def init_tokens( tokenlist ):
"""
Returns elments of the list that are initializers
"""
def cond( elem ):
return elem[1].type == 'EQUAL'
return filter( cond, tokenlist)
def label_tokens( tokenlist ):
"""
Returns elements where the first token is a LABEL
(updating rules with labels)
"""
def cond( elem ):
return elem[0].type == 'LABEL'
return filter( cond, tokenlist)
def async_tokens( tokenlist ):
"""
Returns elements where the second token is ASSIGN
(updating rules with no LABELs)
"""
def cond( elem ):
return elem[1].type == 'ASSIGN'
return filter( cond, tokenlist)
def update_tokens( tokenlist ):
"""
Returns tokens that perform updates
"""
def cond( elem ):
return elem[1].type == 'ASSIGN' or elem[2].type == 'ASSIGN'
return filter( cond, tokenlist)
def get_nodes( tokenlist ):
"""
Flattens the list of tokenlist and returns the value of all ID tokens
"""
def cond ( token ):
return token.type == 'ID'
def get( token):
return token.value
nodes = map(get, filter( cond, chain( *tokenlist )))
nodes = set(nodes)
util.check_case( nodes )
return nodes
def tok2line( tokens ):
"""
Turns a list of tokens into a line that can be parsed again
"""
elems = [ str(t.value) for t in tokens ]
if tokens[0].type == 'LABEL':
elems[0] = elems[0] + ':'
return ' '.join( elems )
def test():
"""
Main testrunnner
>>> import util
>>>
>>> text = '''
... A = B = True
... 1: A* = B
... 2: B* = A and B
... C* = not C
... E = False
... F = (1, 2, 3)
... '''
>>>
>>> lexer = Lexer()
>>> tokens = lexer.tokenize_text( text )
>>> tokens[0]
[LexToken(ID,'A',1,0), LexToken(EQUAL,'=',1,2), LexToken(ID,'B',1,4), LexToken(EQUAL,'=',1,6), LexToken(STATE,'True',1,8)]
>>> tokens[1]
[LexToken(LABEL,1,1,0), LexToken(ID,'A',1,3), LexToken(ASSIGN,'*',1,4), LexToken(EQUAL,'=',1,6), LexToken(ID,'B',1,8)]
>>> tokens[2]
[LexToken(LABEL,2,1,0), LexToken(ID,'B',1,3), LexToken(ASSIGN,'*',1,4), LexToken(EQUAL,'=',1,6), LexToken(ID,'A',1,8), LexToken(AND,'and',1,10), LexToken(ID,'B',1,14)]
>>> tokens[3]
[LexToken(ID,'C',1,0), LexToken(ASSIGN,'*',1,1), LexToken(EQUAL,'=',1,3), LexToken(NOT,'not',1,5), LexToken(ID,'C',1,9)]
>>>
>>> get_nodes( tokens )
set(['A', 'C', 'B', 'E', 'F'])
"""
# runs the local suite
import doctest
doctest.testmod( optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE )
def tokenize( text ):
"A one step tokenizer"
lexer = Lexer()
return lexer.tokenize_text( text )
def modify_states( text, turnon=[], turnoff=[] ):
"""
Turns nodes on and off and comments out lines
that contain assignment to any of the nodes
Will use the main lexer.
"""
turnon = util.as_set( turnon )
turnoff = util.as_set( turnoff )
tokens = tokenize( text )
init = init_tokens( tokens )
init_lines = map(tok2line, init)
# override the initial values
init_lines.extend( [ '%s=True' % node for node in turnon ] )
init_lines.extend( [ '%s=False' % node for node in turnoff ] )
alter = turnon | turnoff
update = update_tokens ( tokens )
update_lines = []
for token in update:
line = tok2line( token)
if token[0].value in alter or token[1].value in alter:
line = '#' + line
update_lines.append( line )
all = init_lines + update_lines
return '\n'.join( all )
if __name__ == '__main__':
test()
lexer = Lexer()
text = """
A = B = C = False
D = True
1: A* = B
2: B* = A and B
C* = not C
D* = A
"""
print modify_states( text, turnon=['A', 'B'], turnoff=['C'] )
| 25.374468
| 171
| 0.528928
| 777
| 5,963
| 3.990991
| 0.234234
| 0.017414
| 0.022573
| 0.021928
| 0.159626
| 0.133505
| 0.098355
| 0.060626
| 0.039987
| 0.039987
| 0
| 0.019417
| 0.309073
| 5,963
| 235
| 172
| 25.374468
| 0.733252
| 0.015093
| 0
| 0.097744
| 0
| 0
| 0.139391
| 0.007136
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.037594
| null | null | 0.007519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c478777d84107b3217342ab649b11b3244e8389
| 7,606
|
py
|
Python
|
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | 2
|
2015-03-16T21:18:27.000Z
|
2017-10-09T19:59:24.000Z
|
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from numpy import *
import sys
import weakref
class CyclicException(Exception):
pass
class DuplicateEdgeException(Exception):
pass
class IrreversibleEdgeException(Exception):
pass
class StructureSearch(object):
def __init__(self, scorer, autocommit=False):
self.scorer = scorer
self.autocommit = autocommit
self.network = None
def canReverse(self, newChild, newParent):
"""
check to ensure reverse link is not already present
(In a DAG, it should not be)
"""
if newChild.parents.has_key(newParent.index):
return False
return True
def admissibleEdge(self, var1, var2):
"""Is edge admissible in a DAG?"""
if var1.index == var2.index:
return False
if var1.parents.has_key(var2.index):
return False
if var2.parents.has_key(var1.index):
return False
return True
def merge(self, net, other, data, allowCyclic=False):
"""add the edges from other to self, preventing cycles if asked"""
self.network = net
net.computeEdgeStatistics()
other.computeEdgeStatistics()
indexMap = dict([(v.index, v) for v in net.variables])
undoList = []
def undo(update=True):
for undo2 in reversed(undoList):
undo2(False)
for frm, to in other.edges:
try:
frm2 = indexMap[frm.index]
to2 = indexMap[to.index]
undo2 = self.addEdge(to2, frm2, data, allowCyclic)
frm2.children = None
undoList.append(undo2)
except Exception, msg:
pass
self.network = None
return undo
def cross(self, net, other, data, allowCyclic=False):
self.network = net
net.computeEdgeStatistics()
other.computeEdgeStatistics()
indexMap = dict([(v.index, v) for v in net.variables])
indexMap2 = dict([(v.index, v) for v in other.variables])
undoList = []
if len(net.edges) == 0: return other
if len(other.edges) == 0: return net
if len(net.edges) < net.numVariables / 2 and len(other.edges) < other.numVariables / 2:
return net
def undo(update=True):
for undo2 in reversed(undoList):
undo2(False)
for variable in net.variables:
# pick a parent
if random.random_sample < 0.5:
# Add relationships from other, avoiding cycles
ps = len(variable.parents)
for idx, parent in variable.parents.iteritems():
undoList.append(self.removeEdge(idx, variable, allowCyclic))
parent.children = None
for idx, parent2 in v2.parents.iteritems():
try:
parent = indexMap[parent.index]
undoList.append(self.addEdge(variable, parent, data, allowCyclic))
parent.children = None
except Exception, msg:
pass
net.computeEdgeStatistics()
self.network = None
return undo
def removeEdge(self, i, variable, data=None):
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
toRemove = variable.parents[i]
variable.removeParent(toRemove)
toRemove.children = None
self.network.dirty = True
netref = weakref.ref(self.network)
varref = weakref.ref(variable)
remref = weakref.ref(toRemove)
def undo(update=True):
var = varref()
rem = remref()
net = netref()
if var is not None and rem is not None and net is not None:
var.addParent(rem)
rem.children = None
net.restoreComputedState(oldstate)
try:
self.network.updateVar(variable, data)
except:
undo()
raise
return undo
def addEdge(self, child, parent, data = None, allowCyclic = False):
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
if child.parents.has_key(parent.index):
raise DuplicateEdgeException, "Edge already exists"
child.addParent(parent)
parent.children = None
self.network.dirty = True
parentref = weakref.ref(parent)
childref = weakref.ref(child)
netref = weakref.ref(self.network)
def undo(update=True):
parent = parentref()
child = childref()
network = netref()
if parent is not None and child is not None and network is not None:
parent.children = None
child.removeParent(parent)
network.restoreComputedState(oldstate)
if (not allowCyclic) and not self.network.isAcyclic():
undo()
raise CyclicException, "Adding an edge makes network cyclic"
try:
self.network.updateVar(child, data)
except:
undo()
raise
return undo
def reverseEdge(self, i, variable, data=None, allowCyclic = False):
"""toReverse is new child, variable is new parent"""
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
toReverse = variable.parents[i]
if not self.canReverse(toReverse, variable):
raise IrreversibleEdgeException, "Edge reversal disallowed"
variable.removeParent(toReverse)
toReverse.addParent(variable)
variable.children = None
toReverse.children = None
self.network.dirty = True
varref = weakref.ref(variable)
revref = weakref.ref(toReverse)
netref = weakref.ref(self.network)
def undo(update=True):
variable = varref()
toReverse = revref()
network = netref()
if (variable is not None and
toReverse is not None and
network is not None):
variable.addParent(toReverse)
toReverse.removeParent(variable)
network.restoreComputedState(oldstate)
if (not allowCyclic) and not self.network.isAcyclic():
undo()
raise CyclicException, "Reversing an edge makes nework cyclic"
try:
self.network.updateVar(variable, data)
self.network.updateVar(toReverse, data)
except:
undo()
raise
return undo
def attempt(self, fn, exc):
try:
return fn()
except:
exc()
raise
| 35.050691
| 460
| 0.620431
| 858
| 7,606
| 5.48951
| 0.258741
| 0.053716
| 0.017197
| 0.015287
| 0.310403
| 0.278981
| 0.206794
| 0.140127
| 0.128238
| 0.109554
| 0
| 0.006008
| 0.299763
| 7,606
| 217
| 461
| 35.050691
| 0.878333
| 0.007757
| 0
| 0.511905
| 0
| 0
| 0.018501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.029762
| 0.017857
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c486bb219145330aa050e526b0e111823623d51
| 620
|
py
|
Python
|
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
def moveBothTo(point):
while hero.distanceTo(point) > 1:
hero.move(point)
hero.command(peasant, "move", point)
peasant = hero.findNearest(hero.findFriends())
while True:
hero.command(peasant, "buildXY", "decoy", peasant.pos.x + 2, peasant.pos.y)
var nextPoint = {"x": hero.pos.x, "y": hero.pos.y + 28}
moveBothTo(nextPoint)
nextPoint = {"x": hero.pos.x + 28, "y": hero.pos.y}
var enemy = hero.findNearestEnemy()
while enemy:
while enemy.health > 0:
hero.attack(enemy)
enemy = hero.findNearestEnemy()
moveBothTo(nextPoint)
| 31
| 80
| 0.606452
| 76
| 620
| 4.947368
| 0.355263
| 0.074468
| 0.095745
| 0.090426
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014989
| 0.246774
| 620
| 19
| 81
| 32.631579
| 0.79015
| 0
| 0
| 0.125
| 0
| 0
| 0.033278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c48ecfa52411dc6356f3fa1289a95505f086e55
| 2,599
|
py
|
Python
|
issues/migrations/0001_initial.py
|
QizaiMing/ergo-project-manager
|
2b02b2ab6d9e48bfccbbca8c05180b07177dcb77
|
[
"MIT"
] | null | null | null |
issues/migrations/0001_initial.py
|
QizaiMing/ergo-project-manager
|
2b02b2ab6d9e48bfccbbca8c05180b07177dcb77
|
[
"MIT"
] | 3
|
2020-11-01T22:08:38.000Z
|
2022-03-12T00:49:00.000Z
|
issues/migrations/0001_initial.py
|
QizaiMing/ergo-project-manager
|
2b02b2ab6d9e48bfccbbca8c05180b07177dcb77
|
[
"MIT"
] | 2
|
2021-01-03T07:17:16.000Z
|
2021-05-29T17:27:11.000Z
|
# Generated by Django 2.2.12 on 2020-05-01 03:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=2000)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done')], default='To Do', max_length=20)),
('priority', models.CharField(choices=[('Low', 'Low'), ('Medium', 'Medium'), ('High', 'High')], default='Low', max_length=20)),
('assignee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assigned', to=settings.AUTH_USER_MODEL)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to=settings.AUTH_USER_MODEL)),
('linked_to', models.ManyToManyField(related_name='_issue_linked_to_+', to='issues.Issue')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(max_length=1000)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='issues.Issue')),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='media/files')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='issues.Issue')),
],
),
]
| 50.960784
| 157
| 0.614852
| 278
| 2,599
| 5.586331
| 0.320144
| 0.036059
| 0.054089
| 0.084997
| 0.529298
| 0.486156
| 0.438506
| 0.386993
| 0.386993
| 0.386993
| 0
| 0.015516
| 0.231243
| 2,599
| 50
| 158
| 51.98
| 0.761762
| 0.017699
| 0
| 0.395349
| 1
| 0
| 0.124657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c4a4e05ac30862172f332ac22daa59c8c1ecce1
| 2,764
|
py
|
Python
|
com/binghe/hacker/tools/script/ak/check_virus.py
|
ffffff0x/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | 52
|
2019-02-11T13:02:20.000Z
|
2022-02-06T07:43:55.000Z
|
com/binghe/hacker/tools/script/ak/check_virus.py
|
sunshinelyz/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | null | null | null |
com/binghe/hacker/tools/script/ak/check_virus.py
|
sunshinelyz/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | 15
|
2019-02-25T03:04:50.000Z
|
2021-10-19T02:13:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: gbk -*-
# Date: 2019/2/22
# Created by ๅฐๆฒณ
# Description ๅฐ็ๆ็bindshell.exeๆไบคๅฐvscan.novirusthanks.orgๆฃๆต
# ็จๆณ python check_virus.py -f bindshell.exe
# ๅๅฎข https://blog.csdn.net/l1028386804
import re
import httplib
import time
import os
import optparse
from urlparse import urlparse
def printResults(url):
status = 200
host = urlparse(url)[1]
path = urlparse(url)[2]
if 'analysis' not in path:
while status != 302:
conn = httplib.HTTPConnection(host)
conn.request('GET', path)
resp = conn.getresponse()
status = resp.status
print '[+] Scanning file...'
conn.close()
time.sleep(15)
print '[+] Scan Complete.'
path = path.replace('file', 'analysis')
conn = httplib.HTTPConnection(host)
conn.request('GET', path)
resp = conn.getresponse()
data = resp.read()
conn.close()
reResults = re.findall(r'Detection rate:.*\)', data)
htmlStripRes = reResults[1]. \
replace('<font color=\'red\'>', ''). \
replace('</font>', '')
print '[+] ' + str(htmlStripRes)
def uploadFile(fileName):
print "[+] Uploading file to NoVirusThanks..."
fileContents = open(fileName, 'rb').read()
header = {'Content-Type': 'multipart/form-data; \
boundary=----WebKitFormBoundaryF17rwCZdGuPNPT9U'}
params = "------WebKitFormBoundaryF17rwCZdGuPNPT9U"
params += "\r\nContent-Disposition: form-data; " + \
"name=\"upfile\"; filename=\"" + str(fileName) + "\""
params += "\r\nContent-Type: " + \
"application/octet stream\r\n\r\n"
params += fileContents
params += "\r\n------WebKitFormBoundaryF17rwCZdGuPNPT9U"
params += "\r\nContent-Disposition: form-data; " + \
"name=\"submitfile\"\r\n"
params += "\r\nSubmit File\r\n"
params += "------WebKitFormBoundaryF17rwCZdGuPNPT9U--\r\n"
conn = httplib.HTTPConnection('vscan.novirusthanks.org')
conn.request("POST", "/", params, header)
response = conn.getresponse()
location = response.getheader('location')
conn.close()
return location
def main():
parser = optparse.OptionParser('usage %prog -f <filename>')
parser.add_option('-f', dest='fileName', type='string', \
help='specify filename')
(options, args) = parser.parse_args()
fileName = options.fileName
if fileName == None:
print parser.usage
exit(0)
elif os.path.isfile(fileName) == False:
print '[+] ' + fileName + ' does not exist.'
exit(0)
else:
loc = uploadFile(fileName)
printResults(loc)
if __name__ == '__main__':
main()
| 29.72043
| 67
| 0.599132
| 291
| 2,764
| 5.652921
| 0.4811
| 0.007295
| 0.045593
| 0.035258
| 0.167781
| 0.167781
| 0.167781
| 0.167781
| 0.080243
| 0.080243
| 0
| 0.020457
| 0.239508
| 2,764
| 93
| 68
| 29.72043
| 0.762131
| 0.087192
| 0
| 0.185714
| 0
| 0
| 0.242846
| 0.079889
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.085714
| null | null | 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c4bb1688cf1e8399ddcf1585b39fc36418f8801
| 827
|
py
|
Python
|
modules/gitbox/files/asfgit/hooks/sync.py
|
Humbedooh/infrastructure-puppet
|
a85f797d847b80e877cd5b7c66513970f6f80703
|
[
"Apache-2.0"
] | 1
|
2019-06-09T10:25:04.000Z
|
2019-06-09T10:25:04.000Z
|
modules/gitbox/files/asfgit/hooks/sync.py
|
Humbedooh/infrastructure-puppet
|
a85f797d847b80e877cd5b7c66513970f6f80703
|
[
"Apache-2.0"
] | 1
|
2020-05-08T07:07:43.000Z
|
2020-05-08T07:07:43.000Z
|
modules/gitbox/files/asfgit/hooks/sync.py
|
Humbedooh/infrastructure-puppet
|
a85f797d847b80e877cd5b7c66513970f6f80703
|
[
"Apache-2.0"
] | 1
|
2019-12-31T07:28:19.000Z
|
2019-12-31T07:28:19.000Z
|
#!/usr/local/bin/python
import json
import socket
import sys
import asfgit.cfg as cfg
import asfgit.git as git
import asfgit.log as log
import asfgit.util as util
import subprocess, os, time
def main():
ghurl = "git@github:apache/%s.git" % cfg.repo_name
os.chdir("/x1/repos/asf/%s.git" % cfg.repo_name)
try:
for ref in git.stream_refs(sys.stdin):
if ref.is_rewrite():
print("Syncing %s (FORCED)..." % ref.name)
subprocess.check_call(["git", "push", "-f", ghurl, "%s:%s" % (ref.newsha, ref.name)])
else:
print("Syncing %s..." % ref.name)
subprocess.check_call(["git", "push", ghurl, "%s:%s" % (ref.newsha, ref.name)])
except subprocess.CalledProcessError as err:
util.abort("Could not sync with GitHub: %s" % err.output)
| 30.62963
| 98
| 0.613059
| 120
| 827
| 4.175
| 0.475
| 0.095808
| 0.027944
| 0.043912
| 0.283433
| 0.223553
| 0.223553
| 0
| 0
| 0
| 0
| 0.001565
| 0.227328
| 827
| 26
| 99
| 31.807692
| 0.782473
| 0.026602
| 0
| 0
| 0
| 0
| 0.16812
| 0.029888
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.380952
| 0
| 0.428571
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c4c2493de449c01316f5bf624115a0a13bde60f
| 9,598
|
py
|
Python
|
rosimport/_rosdef_loader.py
|
asmodehn/rosimport
|
c63e4769650b1cf19f23fbaa65a356ffae20a536
|
[
"MIT"
] | 5
|
2017-11-11T18:26:28.000Z
|
2019-06-12T08:47:58.000Z
|
rosimport/_rosdef_loader.py
|
asmodehn/rosimport
|
c63e4769650b1cf19f23fbaa65a356ffae20a536
|
[
"MIT"
] | 8
|
2017-06-30T08:28:46.000Z
|
2017-07-18T04:50:18.000Z
|
rosimport/_rosdef_loader.py
|
pyros-dev/rosimport
|
c63e4769650b1cf19f23fbaa65a356ffae20a536
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import contextlib
import importlib
import site
import tempfile
import shutil
from rosimport import genrosmsg_py, genrossrv_py
"""
A module to setup custom importer for .msg and .srv files
Upon import, it will first find the .msg file, then generate the python module for it, then load it.
TODO...
"""
# We need to be extra careful with python versions
# Ref : https://docs.python.org/dev/library/importlib.html#importlib.import_module
# Ref : http://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
# Note : Couldn't find a way to make imp.load_source deal with packages or relative imports (necessary for our generated message classes)
import os
import sys
import logging
# Class to allow dynamic search of packages
class RosSearchPath(dict):
"""
Class to allow dynamic search of packages.
This is where we hook up into python import mechanism in order to generate and discover
packages and messages we are depending on.
But it should not be used during the generation of multiple messages in only one package,
as this is too tricky to get right, and too easy to break by mistake.
"""
def __init__(self, **ros_package_paths):
# we use the default ROS_PACKAGE_PATH if already setup in environment.
# This allows us to find message definitions in a ROS distro (and collaborate with pyros_setup)
package_paths = {}
for distropath in [d for d in os.environ.get('ROS_PACKAGE_PATH', '').split(':') if os.path.exists(d)]:
for p in [pkgd for pkgd in os.listdir(distropath) if os.path.exists(os.path.join(distropath, pkgd, 'msg'))]:
package_paths[p] = package_paths.get(p, set()) | {os.path.join(distropath, p, 'msg')}
# we add any extra path
package_paths.update(ros_package_paths)
super(RosSearchPath, self).__init__(package_paths)
def try_import(self, item):
try:
# we need to import the .msg submodule (only one usable as dependency)
mod = importlib.import_module(item + '.msg')
# import succeeded : we should get the namespace path
# and add it to the list of paths to avoid going through this all over again...
for p in mod.__path__:
# Note we want dependencies here. dependencies are ALWAYS '.msg' files in 'msg' directory.
msg_path = os.path.join(p)
# We add a path only if we can find the 'msg' directory
self[item] = self.get(item, set() | ({msg_path} if os.path.exists(msg_path) else set()))
return mod
except ImportError:
# import failed
return None
def __contains__(self, item):
""" True if D has a key k, else False. """
has = super(RosSearchPath, self).__contains__(item)
if not has: # attempt importing. solving ROS path setup problem with python import paths setup.
self.try_import(item)
# Note : if ROS is setup, rospkg.RosPack can find packages
# try again (might work now)
return super(RosSearchPath, self).__contains__(item)
def __getitem__(self, item):
""" x.__getitem__(y) <==> x[y] """
got = super(RosSearchPath, self).get(item)
if got is None:
# attempt discovery by relying on python core import feature.
self.try_import(item)
# Note : if ROS is setup, rospkg.RosPack can find packages
return super(RosSearchPath, self).get(item)
# singleton instance, to keep used ros package paths in cache
ros_import_search_path = RosSearchPath()
def RosLoader(rosdef_extension):
"""
Function generating ROS loaders.
This is used to keep .msg and .srv loaders very similar
"""
if rosdef_extension == '.msg':
loader_origin_subdir = 'msg'
loader_file_extension = rosdef_extension
loader_generated_subdir = 'msg'
loader_generator = genrosmsg_py
elif rosdef_extension == '.srv':
loader_origin_subdir = 'srv'
loader_file_extension = rosdef_extension
loader_generated_subdir = 'srv'
loader_generator = genrossrv_py
else:
raise RuntimeError("RosLoader for a format {0} other than .msg or .srv is not supported".format(rosdef_extension))
import filefinder2.machinery
class ROSDefLoader(filefinder2.machinery.SourceFileLoader):
"""
Python Loader for Rosdef files.
Note : We support ROS layout :
- msg/myMsg.msg
- srv/mySrv.srv
- my_pkg/__init__.py # doesnt really matters ( we rely on PEP 420 )
OR inside the python code:
- my_pkg/__init__.py # doesnt really matters ( we rely on PEP 420 )
- my_pkg/msg/myMsg.msg
- my_pkg/srv/mySrv.srv
BUT the following is also importable relatively,
which is especially useful for tests or intra-package ROS communication,
although it cannot be used as another package dependency (due to ROS limitations)
- my_pkg/__init__.py # doesnt really matters ( we rely on PEP 420 )
- my_pkg/subpkg/__init__.py # doesnt really matters ( we rely on PEP 420 )
- my_pkg/subpkg/msg/myMsg.msg
- my_pkg/subpkg/srv/mySrv.srv
In that case myMsg.py will also be generated under mypkg.msg,
but can be imported relatively from my_pkg/subpkg/module.py with "from .msg import mypkg"
"""
rosimport_tempdir = os.path.join(tempfile.gettempdir(), 'rosimport')
def __init__(self, fullname, path):
self.logger = logging.getLogger(__name__)
# to normalize input
path = os.path.normpath(path)
# Doing this in each loader, in case we are running from different processes,
# avoiding to reload from same file (especially useful for boxed tests).
# But deterministic path to avoid regenerating from the same interpreter
rosimport_path = os.path.join(self.rosimport_tempdir, str(os.getpid()))
if not os.path.exists(rosimport_path):
os.makedirs(rosimport_path)
rospackage = fullname.partition('.')[0]
if os.path.isdir(path):
# if we get a package name ending with msg or srv and a non empty directory
if (
fullname.endswith(loader_origin_subdir) and
any([f.endswith(loader_file_extension) for f in os.listdir(path)])
):
# TODO : dynamic in memory generation (we do not need the file ultimately...)
outdir, gen_rosdef_pkgpath = loader_generator(
# generate message's python code at once, for this package level.
rosdef_files=[os.path.join(path, f) for f in os.listdir(path)],
package=fullname,
sitedir=rosimport_path,
search_path=ros_import_search_path,
)
# TODO : handle thrown exception (cleaner than hacking the search path dict...)
# try:
# generator.generate_messages(package, rosfiles, outdir, search_path)
# except genmsg.MsgNotFound as mnf:
# try:
# mod = importlib.import_module(mnf.package)
# # import succeeded : we should get the namespace path that has '/msg'
# # and add it to the list of paths to avoid going through this all over again...
# for p in mod.__path__:
# # Note we want dependencies here. dependencies are ALWAYS '.msg' files in 'msg' directory.
# msg_path = os.path.join(p, genmsg_MSG_DIR)
# # We add a path only if we can find the 'msg' directory
# search_path[mnf.package] = search_path[mnf.package] + ([msg_path] if os.path.exists(msg_path) else [])
# # Try generation again
# generator.generate_messages(package, rosfiles, outdir, search_path)
# except ImportError:
# # import failed
# return None
if not os.path.exists(gen_rosdef_pkgpath):
raise ImportError("{0} file not found".format(gen_rosdef_pkgpath))
# relying on usual source file loader since we have generated normal python code
super(ROSDefLoader, self).__init__(fullname, gen_rosdef_pkgpath)
def get_gen_path(self):
"""Returning the generated path matching the import"""
return self.path # TODO : maybe useless ?
# return os.path.join(self.outdir_pkg, loader_generated_subdir)
def __repr__(self):
return "ROSDefLoader/{0}({1}, {2})".format(loader_file_extension, self.name, self.path)
@staticmethod
def get_file_extension():
return loader_file_extension
@staticmethod
def get_origin_subdir():
return loader_origin_subdir
@staticmethod
def get_generated_subdir():
return loader_generated_subdir
return ROSDefLoader
ROSMsgLoader = RosLoader(rosdef_extension='.msg')
ROSSrvLoader = RosLoader(rosdef_extension='.srv')
| 44.435185
| 137
| 0.620025
| 1,200
| 9,598
| 4.800833
| 0.276667
| 0.016664
| 0.013886
| 0.009721
| 0.255511
| 0.222184
| 0.20309
| 0.190939
| 0.157264
| 0.124631
| 0
| 0.003737
| 0.303084
| 9,598
| 215
| 138
| 44.64186
| 0.857527
| 0.443634
| 0
| 0.076923
| 0
| 0
| 0.036036
| 0.0043
| 0
| 0
| 0
| 0.009302
| 0
| 1
| 0.120879
| false
| 0
| 0.263736
| 0.043956
| 0.527473
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
7c5785c50891073f1d8d050a467303e1d02503f4
| 5,967
|
py
|
Python
|
fair/forcing/ozone_tr.py
|
znicholls/FAIR
|
599c44ed140b069968ba7d1ca99de40218e42545
|
[
"Apache-2.0"
] | 1
|
2020-11-14T16:09:39.000Z
|
2020-11-14T16:09:39.000Z
|
fair/forcing/ozone_tr.py
|
znicholls/FAIR
|
599c44ed140b069968ba7d1ca99de40218e42545
|
[
"Apache-2.0"
] | 1
|
2020-11-02T17:59:02.000Z
|
2020-11-02T17:59:02.000Z
|
fair/forcing/ozone_tr.py
|
znicholls/FAIR
|
599c44ed140b069968ba7d1ca99de40218e42545
|
[
"Apache-2.0"
] | 2
|
2020-11-02T16:42:05.000Z
|
2020-12-15T16:36:24.000Z
|
from __future__ import division
import numpy as np
from ..constants import molwt
def regress(emissions,
beta=np.array([2.8249e-4, 1.0695e-4, -9.3604e-4, 99.7831e-4])):
"""Calculates tropospheric ozone forcing from precursor emissions.
Inputs: (nt x 40) emissions array
Keywords:
beta: 4-element array of regression coefficients of precursor
radiative efficiency, W m-2 (Mt yr-1)-1.
order is [CH4, CO, NMVOC, NOx]
Outputs:
tropospheric ozone ERF time series.
"""
if emissions.ndim==2:
em_CH4, em_CO, em_NMVOC, em_NOx = emissions[:,[3, 6, 7, 8]].T
else:
em_CH4, em_CO, em_NMVOC, em_NOx = emissions[[3, 6, 7, 8]]
F_CH4 = beta[0] * em_CH4
F_CO = beta[1] * em_CO
F_NMVOC = beta[2] * em_NMVOC
F_NOx = beta[3] * em_NOx
F = F_CH4 + F_CO + F_NMVOC + F_NOx
return F
def cmip6_stevenson(emissions, C_CH4, T=0, feedback=False,
PI=np.array([722, 170, 10, 4.29]),
beta=np.array([1.77871043e-04, 5.80173377e-05, 2.09151270e-03,
1.94458719e-04])):
"""Calculates tropospheric ozone forcing from precursor emissions based on
Stevenson et al, 2013 10.5194/acp-13-3063-2013
Inputs:
emissions: (nt x 40) numpy array
C_CH4 : (nt) numpy array of methane concentrations, ppb
Keywords:
T : change in surface temperature since pre-industrial
feedback : True or False - include temperature feedback on ozone
forcing?
PI: : 4-element array of pre-industrial CH4 concentrations,
CO emissions, NMVOC emissions and NOx emissions
beta: : coefficients of how CH4 concentrations, CO emissions,
NMVOC emissions and NOx emissions affect forcing
Outputs:
tropospheric ozone ERF time series.
"""
# expand to 2D/1D if not already
if emissions.ndim == 1:
nspec = len(emissions)
emissions = emissions.reshape((1, nspec))
if np.isscalar(C_CH4):
C_CH4 = np.ones(1)*C_CH4
year, em_CO, em_NMVOC, em_NOx = emissions[:,[0, 6, 7, 8]].T
nt = len(year)
F_CH4, F_CO, F_NMVOC, F_NOx = np.zeros((4,nt))
for i in range(nt):
F_CH4[i] = beta[0] * (C_CH4[i]-PI[0])
F_CO[i] = beta[1] * (em_CO[i]-PI[1])
F_NMVOC[i] = beta[2] * (em_NMVOC[i]-PI[2])
F_NOx[i] = beta[3] * (em_NOx[i]-PI[3])
# Include the effect of climate feedback? We fit a curve to the 2000, 2030
# and 2100 best estimates of feedback based on middle-of-the-road
# temperature projections.
def temperature_feedback(T, a=0.03189267, b=1.34966941, c=-0.03214807):
if T<=0:
return 0
else:
return a*np.exp(-b*T)+c
if feedback:
F = F_CH4 + F_CO + F_NMVOC + F_NOx + temperature_feedback(T)
else:
F = F_CH4 + F_CO + F_NMVOC + F_NOx
return F
def stevenson(emissions, C_CH4, T=0, feedback=False, fix_pre1850_RCP=False,
PI=np.array([722, 170, 10, 4.29])):
"""Calculates tropospheric ozone forcing from precursor emissions based on
Stevenson et al, 2013 10.5194/acp-13-3063-2013
Inputs:
emissions: (nt x 40) numpy array
C_CH4 : (nt) numpy array of methane concentrations, ppb
Keywords:
T : change in surface temperature since pre-industrial
feedback : True or False - include temperature feedback on ozone
forcing?
fix_pre1850_RCP: Use different relationship for 1750/65 to 1850 based
on anthropogenic emissions from Skeie et al (2011)
for 1750 (atmos-chem-phys.net/11/11827/2011)
PI: : 4-element array of pre-industrial CH4 concentrations,
CO emissions, NMVOC emissions and NOx emissions
Outputs:
tropospheric ozone ERF time series.
"""
# expand to 2D/1D if not already
if emissions.ndim == 1:
nspec = len(emissions)
emissions = emissions.reshape((1, nspec))
if np.isscalar(C_CH4):
C_CH4 = np.ones(1)*C_CH4
# numbers in denominator are 2000-1750 concs or emissions used in
# Stevenson and traced back to Lamarque et al 2010 for 2000
# https://www.atmos-chem-phys.net/10/7017/2010/
year, em_CO, em_NMVOC, em_NOx = emissions[:,[0, 6, 7, 8]].T
nt = len(year)
F_CH4, F_CO, F_NMVOC, F_NOx = np.zeros((4,nt))
for i in range(nt):
if year[i]>=1850 or fix_pre1850_RCP==False:
F_CH4[i] = 0.166/960 * (C_CH4[i]-PI[0])
F_CO[i] = 0.058/681.8 * (em_CO[i]-PI[1])
F_NMVOC[i] = 0.035/155.84 * (em_NMVOC[i]-PI[2])
F_NOx[i] = 0.119/61.16 * (em_NOx[i] *
molwt.NO / molwt.N - PI[3])
# The RCP scenarios give a negative forcing prior to ~1780. This is
# because the anthropogenic emissions are given to be zero in RCPs but
# not zero in the Skeie numbers which are used here. This can be fixed
# to give a more linear behaviour.
else:
F_CH4[i] = 0.166/960 * (C_CH4[i]-722)
F_CO[i] = 0.058/681.8 * 215.59 * em_CO[i] / 385.59
F_NMVOC[i] = 0.035/155.84 * 51.97 * em_NMVOC[i] / 61.97
F_NOx[i] = 0.119/61.16 * 7.31 * (em_NOx[i]
* molwt.NO / molwt.N) / 11.6
# Include the effect of climate feedback? We fit a curve to the 2000, 2030
# and 2100 best estimates of feedback based on middle-of-the-road
# temperature projections.
def temperature_feedback(T, a=0.03189267, b=1.34966941, c=-0.03214807):
if T<=0:
return 0
else:
return a*np.exp(-b*T)+c
if feedback:
F = F_CH4 + F_CO + F_NMVOC + F_NOx + temperature_feedback(T)
else:
F = F_CH4 + F_CO + F_NMVOC + F_NOx
return F
| 36.384146
| 78
| 0.586224
| 911
| 5,967
| 3.731065
| 0.231614
| 0.015299
| 0.014122
| 0.014416
| 0.702265
| 0.702265
| 0.69138
| 0.644307
| 0.600177
| 0.558694
| 0
| 0.113499
| 0.306016
| 5,967
| 163
| 79
| 36.607362
| 0.707317
| 0.452321
| 0
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c606dd98dcd0e38522a604061eae8d10c8862e6
| 1,844
|
py
|
Python
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 244
|
2016-04-12T15:39:47.000Z
|
2021-09-10T07:43:55.000Z
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 57
|
2016-03-29T22:12:09.000Z
|
2019-08-26T07:50:11.000Z
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 311
|
2016-04-27T04:41:02.000Z
|
2021-09-19T14:03:35.000Z
|
# Checks for broken links in the book chapters, printing the status of each link found to stdout.
# The Python package 'requests' must be installed and available for this simple module to work.
# Author: David Maxwell
# Date: 2017-02-14
import re
import requests
def main(chapters_list_filename, hide_success=True):
"""
hide_success = a boolean switch that determines whether to show URLs that return a HTTP 200.
If set to true, only URLs that fail will be printed.
"""
chapters_f = open(chapters_list_filename, 'r')
pattern = re.compile(r'\[([^]]+)]\(\s*(http[s]?://[^)]+)\s*\)') # http://stackoverflow.com/a/23395483
print 'filename\tline_no\ttitle\turl\tstatus_code'
for filename in chapters_f:
filename = filename.strip()
if not filename or filename.startswith('{'): # Skip non-filename lines
continue
chapter_f = open(filename, 'r')
line_no = 1
for line in chapter_f:
line = line.strip()
for match in re.findall(pattern, line):
title = match[0]
url = match[1]
if '127.0.0.1' in url or 'localhost' in url: # Don't check localhost URLs
continue
request = None
status_code = -1
try:
request = requests.get(url)
status_code = request.status_code
except requests.exceptions.ConnectionError:
request = None
status_code = 'FAILED_TO_CONNECT'
if hide_success and status_code == 200:
continue
title = title.replace('\t', ' ')
print '{filename}\t{line_no}\t{title}\t{url}\t{status_code}'.format(filename=filename,
line_no=line_no,
title=title,
url=url,
status_code=status_code)
line_no = line_no + 1
chapter_f.close()
chapters_f.close()
if __name__ == '__main__':
main('Book.txt', hide_success=False)
| 28.369231
| 103
| 0.645879
| 256
| 1,844
| 4.496094
| 0.441406
| 0.069505
| 0.034752
| 0.03649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023571
| 0.240781
| 1,844
| 65
| 104
| 28.369231
| 0.798571
| 0.170824
| 0
| 0.125
| 0
| 0
| 0.13836
| 0.096633
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.05
| null | null | 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c67194eb5ab82333266efd8ffcbf64d199afeff
| 637
|
py
|
Python
|
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
import numpy as np
size = 1000
def create_wall(x, y):
return "{0:b}".format(x**3 + 12*x*y + 5*x*y**2).count("1") & 1
def build_grid():
return np.array([create_wall(j+1, i+1) for i in range(size) for j in range(size)]).reshape(size, size)
def visit(grid, x=0, y=0):
if grid[x][y]:
return
grid[x][y] = 1
if x > 0: visit(grid, x-1, y)
if x < size-1: visit(grid, x+1, y)
if y > 0: visit(grid, x, y-1)
if y < size-1: visit(grid, x, y+1)
grid = build_grid()
print "Original grid\n"
print grid
visit(grid)
print "\n\nAfter search\n"
print grid
print "\n%d unvisited points in grid" % (size**2 - np.count_nonzero(grid))
| 20.548387
| 104
| 0.620094
| 133
| 637
| 2.932331
| 0.308271
| 0.035897
| 0.128205
| 0.053846
| 0.187179
| 0.071795
| 0
| 0
| 0
| 0
| 0
| 0.050193
| 0.186813
| 637
| 31
| 105
| 20.548387
| 0.702703
| 0
| 0
| 0.095238
| 0
| 0
| 0.106583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.047619
| null | null | 0.238095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c67a7fccb58ad0744513e429cedf4044452005e
| 311
|
py
|
Python
|
databases/music.py
|
danielicapui/programa-o-avancada
|
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
|
[
"MIT"
] | null | null | null |
databases/music.py
|
danielicapui/programa-o-avancada
|
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
|
[
"MIT"
] | null | null | null |
databases/music.py
|
danielicapui/programa-o-avancada
|
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
|
[
"MIT"
] | null | null | null |
from utills import *
conn,cur=start('music')
criarTabela("tracks","title text,plays integer")
music=[('trunder',20),
('my way',15)]
insertInto("tracks","title,plays",music)
#cur.executemany("insert into tracks (title,plays) values (?,?)",music)
buscaTabela("tracks","title")
conn.commit()
conn.close()
| 25.916667
| 71
| 0.691318
| 40
| 311
| 5.375
| 0.65
| 0.204651
| 0.148837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014235
| 0.096463
| 311
| 11
| 72
| 28.272727
| 0.75089
| 0.22508
| 0
| 0
| 0
| 0
| 0.316667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c684d5c56bbbdacbeb8612a9b08130a83635f9a
| 13,250
|
py
|
Python
|
video_analysis/code/scene_postprocess.py
|
pdxcycling/carv.io
|
cce0f91a76d3ceed714b3625d415131fd9540899
|
[
"MIT"
] | null | null | null |
video_analysis/code/scene_postprocess.py
|
pdxcycling/carv.io
|
cce0f91a76d3ceed714b3625d415131fd9540899
|
[
"MIT"
] | null | null | null |
video_analysis/code/scene_postprocess.py
|
pdxcycling/carv.io
|
cce0f91a76d3ceed714b3625d415131fd9540899
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import re
from collections import Counter
from flow_preprocess import FlowPreprocess
class ScenePostprocess(object):
"""
Heavy-lifting macro-feature class
"""
def __init__(self, flow_df, quality_df, remove_transitions=False):
"""
Default constructor
Args:
flow_df: Optical flow dataframe
quality_df: Image quality dataframe
remove_transitions: whether to remove frames around
scene transitions
Returns:
Nothing
"""
self.flow_df = flow_df.copy()
self.quality_df = quality_df.copy()
self.remove_transitions = remove_transitions
self.is_static = None
self.duration = self.get_duration()
self.num_frames = quality_df.shape[0]
## Do some rudimentary cleaning of/addding to the flow data
self.flow_df['distance'] = FlowPreprocess.flow_distances(self.flow_df)
self.flow_df['angle'] = FlowPreprocess.flow_angles(self.flow_df)
## Add scene-centric timestamps
## TODO: This has a few issues with actual start times...
scene_time_offset = self.quality_df['time'].min()
self.flow_df['time_scene'] = self.flow_df['time'] - scene_time_offset
self.quality_df['time_scene'] = self.quality_df['time'] - scene_time_offset
self.min_time_scene = self.quality_df['time_scene'].min()
self.max_time_scene =self.quality_df['time_scene'].max()
self.min_frame_num = self.quality_df['frame_number'].min()
self.max_frame_num = self.quality_df['frame_number'].max()
def _find_columns_by_name(self, df, name_re):
"""
Helper function to find binned features by the prefixes in their names
Args:
df: Dataframe
name_re: regular expression for finding colmns
Returns:
List of columns that have names that match name_re
"""
output = []
cols = df.columns
for c in cols:
if re.search(name_re, c):
output.append(c)
return output
def get_duration(self):
"""
Find scene duration (in seconds)
Args:
None
Returns:
Duration of scene in seconds
"""
min_time = np.min(self.quality_df['time'])
max_time = np.max(self.quality_df['time'])
return max_time - min_time
def get_avg_blur(self):
"""
Find average blur across entire scene
NOTE: The higher the number, the less the blur.
Args:
None
Returns:
Average blur as single float value
"""
avg_blur = np.mean(self.quality_df['blur'])
return avg_blur
def get_blur_percentage(self, blur_threshold=100):
"""
Proportion of of frames in scene that are blurry.
A frame is "blurry" if its average blur is below blur_threshold
Args:
blur_threshold: A float value that defines the threshold between
blurry and non-blurry
Returns:
Flow value of the proportion of the scene's frames that are blurry
"""
blur_pct = 1. * np.sum(self.quality_df['blur'] < blur_threshold)/self.quality_df.shape[0]
return blur_pct
def get_top_colors(self, num_colors=10):
"""
Find the dominant colors in all frames across the scene
NOTE: This can be sped if only a subset of frames are sampled.
Need to run experiments on the optimal sampling rate.
TODO: This approach should be changed in v2.0
Args:
num_colors: The number of most common colors to return.
This is 10 by default.
Returns:
Numpy array containing the most prevalent colors in the scene
"""
self.num_colors = num_colors
max_color_array = np.array(str)
cols = self._find_columns_by_name(self.quality_df, "hue")
for frame_num in range(self.min_frame_num, self.max_frame_num + 1):
frame_color_array = self.quality_df[cols].ix[frame_num].sort_values()[::-1].index.values[:self.num_colors]
max_color_array = np.append(max_color_array, frame_color_array)
## Find most common colors
color_count = Counter(max_color_array)
return map(lambda x: x[0], color_count.most_common(self.num_colors))
def _get_values_from_bin_names(self, cols):
"""
From a list of columns representing bins, return a list of the values
of those bins
Args:
cols: a list of column names of histogram bins
Returns:
A list of the value of each bin
"""
values = []
for c in cols:
matches = re.search('_(\d+.\d+)', c)
if matches:
values.append(float(matches.groups(0)[0]))
else:
## This should never happen, but just in case...
values.append(None)
return values
def get_avg_saturation(self):
"""
Find the average saturation across all frames in the scene
Args:
None
Returns:
A float value of average scene saturation
"""
cols = self._find_columns_by_name(self.quality_df, "sat")
vals = self._get_values_from_bin_names(cols)
sums = self.quality_df[cols].sum()
avg = np.sum((sums * vals).values)/np.sum(sums)
return avg
def get_avg_value(self):
"""
Find the average value (from HSV colorspace) across
all frames in the scene
Args:
None
Returns:
A float value of average scene HSV value
"""
cols = self._find_columns_by_name(self.quality_df, "val")
vals = self._get_values_from_bin_names(cols)
sums = self.quality_df[cols].sum()
avg = np.sum((sums * vals).values)/np.sum(sums)
return avg
def get_pixel_pct(self, col_name, frame_size=(480., 360.)):
"""
Calculates the number of pixels in a scene are in col_name
Args:
col_name: the name of column of interest
frame_size:
Returns:
Proportion of pixels that are in the column of interest
"""
frame_pixels = frame_size[0] * frame_size[1]
num_frames = self.quality_df.shape[0]
total_pixels = frame_pixels * num_frames
pixel_cnt = np.sum(self.quality_df[col_name])
return pixel_cnt / total_pixels
"""
vvv Flow calculations vvv
"""
def get_flow_percentile(self, percentile=0.5):
"""
Find the distance traveled by optical flow point,
filtered by the specified percentile.
Args:
percentile: Flow distance percentile to return.
Percentile is between 0 and 1.
Returns:
A float value of the flow distance
"""
return self.flow_df['distance'].quantile(percentile)
def get_avg_flow(self):
"""
Find the average distance an optical flow point has traveled between
frames.
Args:
None
Returns:
A float value of the average distance an optical flow point
has traveled between frames
"""
return self.flow_df['distance'].mean()
def get_shake(self):
"""
Return the shakiness of the scene. Shake is calculated by finding the
median distance an optical flow point has traveled in each frame, and
averaging these values.
TODO: vector addition.
Args:
None.
Returns:
A float value representing the shakiness of a scene.
"""
if not self.flow_df.empty:
shake = np.mean((self.flow_df.groupby('frame_number').median())['distance'])
else:
shake = 0
return shake
def get_flow_angle(self):
"""
Find the average angle of travel of the optical flow points in a scene.
Args:
None
Returns:
A float value of the average optical flow angle
"""
return self.flow_df['angle'].mean()
def get_flow_angle_std_dev(self):
"""
Find the standard devation of all optical flows in a scene
Args:
None
Returns:
A float value of the standard deviation of optical flow angle
"""
return self.flow_df['angle'].std()
def is_static_scene(self, remove_transitions=False):
"""
Determines whether or not scene is a static scene (vs. action scene)
TODO: Ignore some time around scene transitions because of fades.
Ensure that scene is long enough.
Args:
remove_transitions: remove frames at beginning and end of scene
Returns:
A boolean value of whether a scene is static or not.
"""
is_static = None
motion_threshold = 1 # one pixel of movement
total_flow_points = self.flow_df.shape[0] ## number of frames in range
thresholded_df = self.flow_df[self.flow_df['distance'] > motion_threshold].copy()
if thresholded_df.empty:
is_static = True
else:
## Due to "artsy" transitions, ignore around beginning/end of scene
if remove_transitions:
## Amount of transition time between scenes
## This could be a percentage...
transition_time_buffer = 1 # in seconds
## Ensure that scene is long enough to remove buffer from analysis
if self.max_time_scene > transition_time_buffer:
thresholded_df = thresholded_df[thresholded_df['time_scene'] > transition_time_buffer]
thresholded_df = thresholded_df[thresholded_df['time_scene'] < self.max_time_scene - transition_time_buffer]
## Do not remove transitions if scene is too short
else:
pass
if not thresholded_df.empty:
##moving_flow_points = thresholded_df.shape[0]
moving_frames = thresholded_df.groupby(by=['frame_number']).mean().shape[0]
else:
##moving_flow_points = 0
moving_frames = 0
##pts_ratio = 1. * moving_flow_points/self.num_frames
pts_ratio = 1. * moving_frames/self.num_frames
# less than 1 moving frame per 4 frames
is_static = pts_ratio < .25
return is_static
def num_trackable_points_per_frame(self):
"""
Find the total number of optical flow points that are trackable per
frame.
"Trackability" is defined as being able to find a specific optical
flow point between frames.
Args:
None
Returns:
A dataframe with the number of trackable points, by frame.
"""
return self.flow_df.groupby('frame_number').size()
def avg_num_trackable_points_per_frame(self):
"""
Find the average number of optical flow points that are trackable,
over all frames in the frame.
"Trackability" is defined as being able to find a specific optical
flow point between frames.
Args:
None
Returns:
A float value of the average number of trackable optical flow
points in all of the scene's frames
"""
return 1. * len(self.flow_df) / self.num_frames
def to_df(self):
"""
Return a dataframe containing all features
TODO: better type checking
Args:
None
Returns:
Dataframe with all features
"""
scene_df = pd.DataFrame(index=[0])
top_colors = self.get_top_colors()
for n in range(self.num_colors):
scene_df['top_color_' + str(n)] = top_colors[n]
scene_df['avg_sat'] = self.get_avg_saturation()
scene_df['avg_val'] = self.get_avg_value()
scene_df['black_pixel_pct'] = self.get_pixel_pct('num_black_pixels')
scene_df['white_pixel_pct'] = self.get_pixel_pct('num_white_pixels')
scene_df['flow_percentile_25'] = self.get_flow_percentile(0.25)
scene_df['flow_percentile_50'] = self.get_flow_percentile(0.25)
scene_df['flow_percentile_75'] = self.get_flow_percentile(0.25)
scene_df['flow_avg'] = self.get_avg_flow()
scene_df['flow_angle'] = self.get_flow_angle()
scene_df['flow_angle_std_dev'] = self.get_flow_angle_std_dev()
scene_df['is_static_scene'] = self.is_static_scene()
##scene_df['action_peak_in_scene'] = None # where in scene does no
scene_df['shake_coeff'] = self.get_shake()
scene_df['avg_flow_pts_per_frame'] = self.avg_num_trackable_points_per_frame()
scene_df['blur'] = self.get_avg_blur()
scene_df['blur_pct'] = self.get_blur_percentage()
scene_df['duration'] = self.get_duration()
return scene_df
| 35.05291
| 128
| 0.60234
| 1,702
| 13,250
| 4.480024
| 0.174501
| 0.029508
| 0.035803
| 0.016787
| 0.309115
| 0.268459
| 0.229246
| 0.183475
| 0.153311
| 0.132852
| 0
| 0.006762
| 0.31917
| 13,250
| 377
| 129
| 35.145889
| 0.838488
| 0.367774
| 0
| 0.109489
| 0
| 0
| 0.066629
| 0.003146
| 0
| 0
| 0
| 0.013263
| 0
| 1
| 0.138686
| false
| 0.007299
| 0.036496
| 0
| 0.313869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c6b5cb13f50ba4f535dc82987b58898ad693a5f
| 5,966
|
py
|
Python
|
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
from common import *
import csv
import argparse
from unidecode import unidecode
from nameparser import constants as npc
from collections import defaultdict
import cPickle as pickle
import re
stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation'])
stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS
def bin_exactsamename(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_samename(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname_joined']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fFfL(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fFfL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fF3L(authors, max_bin_size=20):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2:
bins[a['fFiL'] + a['name_last'][1:3]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_fFiL(authors, max_bin_size=20):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if len(a['fFiL']) > 2:
bins[a['fFiL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_iFfL(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['iFfL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fullparsedname(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname_parsed']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_iFoffbyoneL(authors, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname'] and a['name_first'] and a['name_last']:
bins[a['name_first'][0] + a['name_last']].add(id)
if len(a['name_last']) > 1:
bins[a['name_first'][0] + a['name_last'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_2FoffbyoneL(authors, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']:
bins[a['name_first'][0:2] + a['name_last']].add(id)
if len(a['name_last']) > 1:
bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_metaphone(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if a['metaphone_fullname']:
bins[a['metaphone_fullname']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
# bk = bins.keys()
# for b in bk:
# if len(bins[b]) > max_bin_size:
# del bins[b]
return bins
def bin_offbylastone(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname_joined']:
bins[a['fullname_joined']].add(id)
if len(a['fullname_joined']) > 1:
bins[a['fullname_joined'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_token(authors, nw=2, max_bin_size=100):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['name']:
tokens = re.sub("[^\w]", " ", a['name']).split()
tokens = [v for v in tokens if len(v) > 2 and v not in stopwords]
ngrams = zip(*[tokens[j:] for j in range(nw)])
for p in ngrams:
pg = ' '.join(p)
if len(pg) > len(p)*2-1:
bins[pg].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_ngrams(authors, n=15, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname']:
lname = a['fullname']
ngrams = zip(*[lname[j:] for j in range(n)])
for p in ngrams:
if not any(((s in p) for s in stopwords_custom)):
bins[''.join(p)].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def main():
parser = argparse.ArgumentParser()
parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle')
parser.add_argument('type', nargs='?', default='iFfL')
args = parser.parse_args()
print_err("Loading pickled author pre-features")
authors = pickle.load(open(args.authorprefeat, 'rb'))
bins = globals()["bin_"+args.type](authors)
bins = sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True)
for _, binlabel, binv in bins:
print binlabel + ';' + ','.join(map(str, sorted(binv)))
if __name__ == "__main__":
main()
| 29.979899
| 703
| 0.632417
| 936
| 5,966
| 3.930556
| 0.192308
| 0.014134
| 0.030443
| 0.074205
| 0.563468
| 0.555858
| 0.555858
| 0.537374
| 0.537374
| 0.522153
| 0
| 0.029124
| 0.177003
| 5,966
| 199
| 704
| 29.9799
| 0.720163
| 0.016929
| 0
| 0.58642
| 0
| 0
| 0.15714
| 0.005289
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.049383
| null | null | 0.092593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c6cc14ec8ce3c7dc9875cccdf742d57d079973d
| 10,181
|
py
|
Python
|
diofant/tests/integrals/test_heurisch.py
|
Electric-tric/diofant
|
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
|
[
"BSD-3-Clause"
] | 1
|
2021-08-22T09:34:15.000Z
|
2021-08-22T09:34:15.000Z
|
diofant/tests/integrals/test_heurisch.py
|
Electric-tric/diofant
|
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tests/integrals/test_heurisch.py
|
Electric-tric/diofant
|
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from diofant import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW,
Piecewise, Rational, Sum, Symbol, acos, asin, asinh,
besselj, cos, cosh, diff, erf, exp, li, log, pi, ratsimp,
root, simplify, sin, sinh, sqrt, symbols, tan)
from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper
__all__ = ()
x, y, z, nu = symbols('x,y,z,nu')
f = Function('f')
def test_components():
assert components(x*y, x) == {x}
assert components(1/(x + y), x) == {x}
assert components(sin(x), x) == {sin(x), x}
assert components(sin(x)*sqrt(log(x)), x) == \
{log(x), sin(x), sqrt(log(x)), x}
assert components(x*sin(exp(x)*y), x) == \
{sin(y*exp(x)), x, exp(x)}
assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \
{sin(x), root(x, 54), sqrt(sin(x)), x}
assert components(f(x), x) == \
{x, f(x)}
assert components(Derivative(f(x), x), x) == \
{x, f(x), Derivative(f(x), x)}
assert components(f(x)*diff(f(x), x), x) == \
{x, f(x), Derivative(f(x), x), Derivative(f(x), x)}
def test_heurisch_polynomials():
assert heurisch(1, x) == x
assert heurisch(x, x) == x**2/2
assert heurisch(x**17, x) == x**18/18
def test_heurisch_fractions():
assert heurisch(1/x, x) == log(x)
assert heurisch(1/(2 + x), x) == log(x + 2)
assert heurisch(1/(x + sin(y)), x) == log(x + sin(y))
# Up to a constant, where C = 5*pi*I/12, Mathematica gives identical
# result in the first case. The difference is because diofant changes
# signs of expressions without any care.
# XXX ^ ^ ^ is this still correct?
assert heurisch(5*x**5/(
2*x**6 - 5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) / 12]
assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) / 12
assert heurisch(1/x**2, x) == -1/x
assert heurisch(-1/x**5, x) == 1/(4*x**4)
def test_heurisch_log():
assert heurisch(log(x), x) == x*log(x) - x
assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x)
assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x]
def test_heurisch_exp():
assert heurisch(exp(x), x) == exp(x)
assert heurisch(exp(-x), x) == -exp(-x)
assert heurisch(exp(17*x), x) == exp(17*x) / 17
assert heurisch(x*exp(x), x) == x*exp(x) - exp(x)
assert heurisch(x*exp(x**2), x) == exp(x**2) / 2
assert heurisch(exp(-x**2), x) is None
assert heurisch(2**x, x) == 2**x/log(2)
assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2)
assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1)
assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x)
def test_heurisch_trigonometric():
assert heurisch(sin(x), x) == -cos(x)
assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x)
assert heurisch(cos(x), x) == sin(x)
assert heurisch(tan(x), x) in [
log(1 + tan(x)**2)/2,
log(tan(x) + I) + I*x,
log(tan(x) - I) - I*x,
]
assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y)
assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x)
# gives sin(x) in answer when run via setup.py and cos(x) when run via py.test
assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2]
assert heurisch(cos(x)/sin(x), x) == log(sin(x))
assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) / 7
assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) -
2*sin(x) + 2*x*cos(x))
assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \
+ (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4)
def test_heurisch_hyperbolic():
assert heurisch(sinh(x), x) == cosh(x)
assert heurisch(cosh(x), x) == sinh(x)
assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x)
assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x)
assert heurisch(
x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4
def test_heurisch_mixed():
assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2
def test_heurisch_radicals():
assert heurisch(1/sqrt(x), x) == 2*sqrt(x)
assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x)
assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5
assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3
y = Symbol('y')
assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \
2*sqrt(x)*cos(y*sqrt(x))/y
assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise(
(0, Eq(y, 0)),
(-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True))
y = Symbol('y', positive=True)
assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \
2*sqrt(x)*cos(y*sqrt(x))/y
def test_heurisch_special():
assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi)
assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4
def test_heurisch_symbolic_coeffs():
assert heurisch(1/(x + y), x) == log(x + y)
assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2))
assert simplify(diff(heurisch(log(x + y + z), y), y)) == log(x + y + z)
def test_heurisch_symbolic_coeffs_1130():
y = Symbol('y')
assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise(
(-1/x, Eq(y, 0)),
(-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True))
y = Symbol('y', positive=True)
assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 -
I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) /
(2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))]
def test_heurisch_hacking():
assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) ==
x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14)
assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) ==
x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14)
assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) ==
sqrt(7)*asinh(sqrt(7)*x)/7)
assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) ==
sqrt(7)*asin(sqrt(7)*x)/7)
assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14)
assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2
assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2
assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x))
def test_heurisch_function():
assert heurisch(f(x), x) is None
def test_heurisch_wrapper():
f = 1/(y + x)
assert heurisch_wrapper(f, x) == log(x + y)
f = 1/(y - x)
assert heurisch_wrapper(f, x) == -log(x - y)
f = 1/((y - x)*(y + x))
assert heurisch_wrapper(f, x) == \
Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y, True))
# issue sympy/sympy#6926
f = sqrt(x**2/((y - x)*(y + x)))
assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \
- y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x
def test_sympyissue_3609():
assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) + I)/2 - \
I*log(log(x) - I)/2
# These are examples from the Poor Man's Integrator
# http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/
def test_pmint_rat():
# TODO: heurisch() is off by a constant: -3/4. Possibly different permutation
# would give the optimal result?
def drop_const(expr, x):
if expr.is_Add:
return Add(*[ arg for arg in expr.args if arg.has(x) ])
else:
return expr
f = (x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2)
g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x)
assert drop_const(ratsimp(heurisch(f, x)), x) == g
def test_pmint_trig():
f = (x - tan(x)) / tan(x)**2 + tan(x)
g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2
assert heurisch(f, x) == g
@pytest.mark.slow # 8 seconds on 3.4 GHz
def test_pmint_logexp():
f = (1 + x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x
g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x))
# TODO: Optimal solution is g = 1/(x + log(x) + exp(x)) + log(x + log(x) + exp(x)),
# but Diofant requires a lot of guidance to properly simplify heurisch() output.
assert ratsimp(heurisch(f, x)) == g
@pytest.mark.slow # 8 seconds on 3.4 GHz
def test_pmint_erf():
f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1)
g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4)
assert ratsimp(heurisch(f, x)) == g
def test_pmint_LambertW():
f = LambertW(x)
g = x*LambertW(x) - x + x/LambertW(x)
assert heurisch(f, x) == g
@pytest.mark.xfail
def test_pmint_besselj():
# TODO: in both cases heurisch() gives None. Wrong besselj() derivative?
f = besselj(nu + 1, x)/besselj(nu, x)
g = nu*log(x) - log(besselj(nu, x))
assert simplify(heurisch(f, x) - g) == 0
f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x
g = besselj(nu, x)
assert simplify(heurisch(f, x) - g) == 0
@pytest.mark.slow
def test_pmint_WrightOmega():
def omega(x):
return LambertW(exp(x))
f = (1 + omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x))
g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x)))
assert heurisch(f, x) == g
def test_RR():
# Make sure the algorithm does the right thing if the ring is RR. See
# issue sympy/sympy#8685.
assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \
0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x)
# TODO: convert the rest of PMINT tests:
# Airy functions
# f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2)
# g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x))
# f = x**2 * AiryAi(x)
# g = -AiryAi(x) + AiryAi(1, x)*x
# Whittaker functions
# f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) * x)
# g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))
| 34.511864
| 112
| 0.534722
| 1,869
| 10,181
| 2.87801
| 0.107009
| 0.030117
| 0.072504
| 0.023796
| 0.466072
| 0.342443
| 0.232757
| 0.204313
| 0.189812
| 0.138873
| 0
| 0.047576
| 0.232001
| 10,181
| 294
| 113
| 34.629252
| 0.640363
| 0.122778
| 0
| 0.086957
| 0
| 0
| 0.00146
| 0
| 0
| 0
| 0
| 0.003401
| 0.467391
| 1
| 0.141304
| false
| 0
| 0.016304
| 0.005435
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c70c6e774d6a8ca53417d3cc9999e257be28aad
| 1,093
|
py
|
Python
|
test/test_pipeline/components/classification/test_passive_aggressive.py
|
vardaan-raj/auto-sklearn
|
4597152e3a60cd6f6e32719a3bef26e13951b102
|
[
"BSD-3-Clause"
] | 1
|
2021-02-21T16:44:44.000Z
|
2021-02-21T16:44:44.000Z
|
test/test_pipeline/components/classification/test_passive_aggressive.py
|
vardaan-raj/auto-sklearn
|
4597152e3a60cd6f6e32719a3bef26e13951b102
|
[
"BSD-3-Clause"
] | 9
|
2021-02-12T17:52:34.000Z
|
2021-06-26T11:37:41.000Z
|
test/test_pipeline/components/classification/test_passive_aggressive.py
|
vardaan-raj/auto-sklearn
|
4597152e3a60cd6f6e32719a3bef26e13951b102
|
[
"BSD-3-Clause"
] | 1
|
2021-07-06T23:02:42.000Z
|
2021-07-06T23:02:42.000Z
|
import sklearn.linear_model
from autosklearn.pipeline.components.classification.passive_aggressive import \
PassiveAggressive
from .test_base import BaseClassificationComponentTest
class PassiveAggressiveComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.92
res["iris_n_calls"] = 5
res["default_iris_iterative"] = 0.92
res["iris_iterative_n_iter"] = 32
res["default_iris_proba"] = 0.29271032477461295
res["default_iris_sparse"] = 0.4
res["default_digits"] = 0.9156041287188829
res["digits_n_calls"] = 6
res["default_digits_iterative"] = 0.9156041287188829
res["digits_iterative_n_iter"] = 64
res["default_digits_binary"] = 0.9927140255009107
res["default_digits_multilabel"] = 0.90997912489192
res["default_digits_multilabel_proba"] = 1.0
res['ignore_hps'] = ['max_iter']
sk_mod = sklearn.linear_model.PassiveAggressiveClassifier
module = PassiveAggressive
step_hyperparameter = {
'name': 'max_iter',
'value': module.get_max_iter(),
}
| 30.361111
| 79
| 0.725526
| 123
| 1,093
| 6.105691
| 0.447154
| 0.11984
| 0.106525
| 0.026631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 0.167429
| 1,093
| 35
| 80
| 31.228571
| 0.715385
| 0
| 0
| 0
| 0
| 0
| 0.26624
| 0.15279
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.185185
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7c7633cae0980db6c9c40b9c34972bdb7f5c0282
| 7,139
|
py
|
Python
|
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | 2
|
2017-10-17T10:08:25.000Z
|
2017-10-17T11:17:39.000Z
|
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | null | null | null |
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
import copy
import math
import Edges
import INTPoint
eps = 1e-7
votes = {}
Groups = []
VPoints = []
Centers = []
Cluster = []
voters = {}
def getEdges(image):
#moved to Edges.py
return Edges.getEdges(image)
def getLines(edges):
#moved to Edges.py
return Edges.getLines(edges)
def checkRound(pos,edges):
#moved to Edges.py
return Edges.checkRound(pos,edges)
def outOfSize(pos,edges):
#moved to Edges.py
return Edges.outOfSize(pos,edges)
def extenLine(line,edges):
#moved to Edges.py
return Edges.extenLine(line,edges)
def extenLines(lines,edges):
#moved to Edges.py
return Edges.extenLines(lines,edges)
def shouldMerge(line1,line2):
#moved to Edges.py
return Edges.shouldMerge(line1,line2)
def mergeLines(lines):
#moved to Edges.py
return Edges.mergeLines(lines)
def getLineABC(line):
#moved to Edges.py
return Edges.getLineABC(line)
def getCirAnch(a,b):
#moved to Edges.py
return Edges.getCirAnch(a,b)
def getCrossPoint(linea,lineb):
#moved to INTPoint.py
return INTPoint.getIntersectPoint(linea,lineb)
def sortLines(lines):
#moved to Edges.py
return Edges.sortLines(lines)
def getVPoints2(lines,arange = 0.2617):
#moved to INTPoint.py
global VPoints
VPoints = INTPoint.getVPoints2(lines,arange)
return VPoints
def getVPoints(num = 16):
#this function is fallen into disuse because of the low speed
for i in range(0,num + 1,1):
lens = len(Groups[i])
for j in range(0,lens,1):
for k in range(j+1,lens,1):
VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k]))
def removeSame(list):
#moved to INTPoint.py
return INTPoint.removeSame(list)
def getLinesLength(line):
#moved to INTPoint.py
return INTPoint.getLinesLength(line)
def getMidPoint(line):
#moved to INTPoint.py
return INTPoint.getMidPoint(line)
def getArch(line,point):
#moved to INTPoint.py
return INTPoint.getArch(line,point)
def voteForPoint(lines):
#moved to INTPoint.py
global votes
global voters
votes,voters = INTPoint.voteForPoint(lines,VPoints)
return
def getGraPoint(points):
count = 1.0
sumx = 0.0
sumy = 0.0
for point in points:
w = votes[point]
count += w
sumx += w * point[0]
sumy += w * point[1]
return (sumx/count,sumy/count)
def devideIntoPoints(Points):
global Cluster
lens = len(Cluster)
for i in range(0,lens,1):
Cluster[i] = []
for point in Points:
if point[0] == 'p' or point[0] == 'h' or point[0] == 'v':
continue
if votes[point] == 0:
continue
minlens = 1e15
minpos = 0
now = -1
for cen in Centers:
now += 1
lens = getLinesLength((point[0],point[1],cen[0],cen[1]))
if lens < minlens:
minlens = lens
minpos = now
Cluster[minpos].append(point)
def KMean(points,K = 3,step = 50):
global Cluster
global Centers
Cluster = []
Centers = []
if K == 1:
step = 1
for i in range(0,K,1):
Cluster.append([])
Centers.append([0,0])
count = 0
for point in points:
if point[0] != 'p' and point[0] != 'v' and point[0] != 'h' and votes[point] != 0:
Centers[count][0] = point[0]
Centers[count][1] = point[1]
count += 1
if count == K:
break
for i in range(0,step,1):
devideIntoPoints(points)
for i in range(0,K,1):
Centers[i] = getGraPoint(Cluster[i])
def getFinal(points):
count = 0.0
num = 0
p1 = 0.0
ret1 = []
p2 = 0.0
ret2 = []
for item in votes:
if item[0] == 'p' or item[0] == 'h' or item[0] == 'v':
if votes[item] > p1:
p2 = p1
ret2 = ret1
p1 = votes[item]
ret1 = item
else:
if votes[item] > p2:
p2 = votes[item]
ret2 = item
else:
count += votes[item]
num += 1
K = 3
ret = []
count = count / num * 0.1
if p1 > count:
K -= 1
ret.append(ret1)
if p2 > count:
K -= 1
ret.append(ret2)
KMean(points,K)
for i in range(0,K,1):
ret.append(Centers[i])
return ret
def deal(inputname,outputname):
global votes
global Groups
global VPoints
global Centers
global Cluster
global voters
votes = {}
Groups = []
VPoints = []
Centers = []
Cluster = []
voters = {}
image = cv.imread(inputname)
edges = getEdges(image)
cv.imwrite(outputname + 'edges.jpg',edges)
lines = getLines(edges)
lines2 = copy.deepcopy(lines)
lines2 = extenLines(lines2,edges)
lines2 = mergeLines(lines2)
#devideIntoGroups(lines2,3)
lines2 = sortLines(lines2)
getVPoints2(lines2)
VPoints = removeSame(VPoints)
voteForPoint(lines2)
votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True)
lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2)))
votesFinal = {}
VPoints = []
for i in range(0,lenofvotes,1):
votesFinal[votes2[i][0]] = votes2[i][1]
VPoints.append(votes2[i][0])
for i in range(lenofvotes,len(votes2),1):
if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p':
votesFinal[votes2[i][0]] = votes2[i][1]
VPoints.append(votes2[i][0])
votes = votesFinal
ans = getFinal(VPoints)
print ans
edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR)
edges2 = copy.deepcopy(edges)
for item in lines:
if item[0] == 'N':
continue
cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)
for item in lines2:
cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)
color = [255,0,0,0]
for clu in Cluster:
for i in range(0,4,1):
if color[i] == 255:
color[i+1] = 255
color[i] = 0
break
for point in clu:
if point[0] > 0 and point[1] > 0:
if point[0] < edges.shape[1] and point[1] < edges.shape[0]:
if votes[point] == 0:
continue
cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10)
for point in ans:
if point[0] > 0 and point[1] > 0:
if point[0] < edges.shape[1] and point[1] < edges.shape[0]:
cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10)
cv.imwrite(outputname + 'linedetect.jpg',edges)
cv.imwrite(outputname + 'answer.jpg',edges2)
fd = open(outputname + 'answer.txt','w')
fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')')
fd.close
deal("data/1.jpg",'1')
| 26.838346
| 157
| 0.559462
| 979
| 7,139
| 4.078652
| 0.162411
| 0.030053
| 0.033058
| 0.038567
| 0.314801
| 0.261207
| 0.184323
| 0.118708
| 0.089657
| 0.089657
| 0
| 0.049771
| 0.2964
| 7,139
| 266
| 158
| 26.838346
| 0.745172
| 0.057851
| 0
| 0.237209
| 0
| 0
| 0.011481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.027907
| null | null | 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c76d6a2f8e354238a96f859815250852db8cda1
| 738
|
py
|
Python
|
kafka-rockset-integration/generate_customers_data.py
|
farkaskid/recipes
|
8eef799cda899ea266f2849d485917f9b0d83190
|
[
"Apache-2.0"
] | 21
|
2019-02-27T22:30:28.000Z
|
2021-07-18T17:26:56.000Z
|
kafka-rockset-integration/generate_customers_data.py
|
farkaskid/recipes
|
8eef799cda899ea266f2849d485917f9b0d83190
|
[
"Apache-2.0"
] | 16
|
2019-07-03T22:04:21.000Z
|
2022-02-26T18:34:05.000Z
|
kafka-rockset-integration/generate_customers_data.py
|
farkaskid/recipes
|
8eef799cda899ea266f2849d485917f9b0d83190
|
[
"Apache-2.0"
] | 11
|
2019-03-13T08:55:31.000Z
|
2022-02-07T08:35:16.000Z
|
"""Generate Customer Data"""
import csv
import random
from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID
ACQUISITION_SOURCES = [
'OrganicSearch',
'PaidSearch',
'Email',
'SocialMedia',
'Display',
'Affiliate'
'Referral'
]
def main():
with open('customers.csv', 'w') as fout:
writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource'])
writer.writeheader()
for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1):
record = {
'CustomerID': int(customer_id),
'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop()
}
writer.writerow(record)
if __name__ == '__main__':
main()
| 22.363636
| 85
| 0.617886
| 73
| 738
| 5.972603
| 0.616438
| 0.137615
| 0.059633
| 0.073395
| 0.119266
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0.001832
| 0.260163
| 738
| 32
| 86
| 23.0625
| 0.796703
| 0.02981
| 0
| 0
| 1
| 0
| 0.195775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c79d2fe84aae88ef213fa559ea2499797887d57
| 959
|
py
|
Python
|
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
|
celikten/armi
|
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
|
[
"Apache-2.0"
] | 1
|
2021-05-29T16:02:31.000Z
|
2021-05-29T16:02:31.000Z
|
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
|
celikten/armi
|
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
|
[
"Apache-2.0"
] | null | null | null |
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
|
celikten/armi
|
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
|
[
"Apache-2.0"
] | null | null | null |
"""
Write MCNP Material Cards
=========================
Here we load a test reactor and write each component of one fuel block out as
MCNP material cards.
Normally, code-specific utility code would belong in a code-specific ARMI
plugin. But in this case, the need for MCNP materials cards is so pervasive
that it made it into the framework.
"""
from armi.reactor.tests import test_reactors
from armi.reactor.flags import Flags
from armi.utils.densityTools import formatMaterialCard
from armi.nucDirectory import nuclideBases as nb
from armi import configure
configure(permissive=True)
_o, r = test_reactors.loadTestReactor()
bFuel = r.core.getBlocks(Flags.FUEL)[0]
for ci, component in enumerate(bFuel, start=1):
ndens = component.getNumberDensities()
# convert nucName (str) keys to nuclideBase keys
ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()}
print("".join(formatMaterialCard(ndensByBase, matNum=ci)))
| 31.966667
| 79
| 0.755996
| 136
| 959
| 5.308824
| 0.610294
| 0.055402
| 0.047091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002436
| 0.1439
| 959
| 29
| 80
| 33.068966
| 0.876979
| 0.402503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.416667
| 0
| 0.416667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c79e12b0a22b9ba1c999ecbf405c389b15998f7
| 6,612
|
py
|
Python
|
life_line_chart/_autogenerate_data.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
life_line_chart/_autogenerate_data.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
life_line_chart/_autogenerate_data.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
import names
import os
import datetime
from random import random
def generate_gedcom_file():
"""generate some gedcom file"""
db = {}
db['n_individuals'] = 0
db['max_individuals'] = 8000
db['n_families'] = 0
db['yougest'] = None
gedcom_content = """
0 HEAD
1 SOUR Gramps
2 VERS 3.3.0
2 NAME Gramps
1 DATE {}
2 TIME 15:35:24
1 SUBM @SUBM@
1 COPR Copyright (c) 2020 Christian Schulze,,,.
1 GEDC
2 VERS 5.5
1 CHAR UTF-8
1 LANG German
""".format(datetime.date.today())
def generate_individual(db, birth_year, sex=None, last_name=None):
if not sex:
sex = 'F' if random() < 0.5 else 'M'
first_name = names.get_first_name(
gender='male' if sex == 'M' else 'female')
if random() < 0.3:
first_name += ' ' + \
names.get_first_name(gender='male' if sex == 'M' else 'female')
if not last_name:
last_name = names.get_last_name()
birth_place = 'Paris' if random() < 0.5 else 'Rome'
death_place = 'Zorge' if random() < 0.5 else 'Bruegge'
db['n_individuals'] += 1
individual_id = '@I{}@'.format(db["n_individuals"])
death_year = birth_year + 40 + int(random()*20)
db[individual_id] = {
'birth': birth_year,
'death': death_year,
'sex': sex,
'last_name': last_name
}
birth_date = '1 JUN {}'.format(birth_year)
death_date = '1 JUN {}'.format(birth_year)
if not db['yougest']:
db['yougest'] = individual_id
elif db[db['yougest']]['birth'] < birth_year:
db['yougest'] = individual_id
db[individual_id]['string'] = """0 {individual_id} INDI
1 NAME {first_name} /{last_name}/
1 SEX {sex}
1 BIRT
2 DATE {birth_date}
2 PLAC {birth_place}
1 DEAT
2 DATE {death_date}
2 PLAC {death_place}
""".format(**locals())
return individual_id
def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None):
if not marriage_place:
marriage_place = 'London' if random() < 0.5 else 'Tokio'
db['n_families'] += 1
marriage_date = '1 MAY {}'.format(marriage_year)
family_id = "@F{}@".format(db['n_families'])
db[family_id] = {'string': """0 {family_id} FAM
1 HUSB {husband_id}
1 WIFE {wife_id}
1 MARR
2 DATE {marriage_date}
2 PLAC {marriage_place}
""".format(
**locals()
)}
for child_id in children_ids:
db[family_id]['string'] += "1 CHIL {}\n".format(child_id)
return family_id
def find_by_birth_date(db, from_year, to_year, sex, exclude=[]):
ids = []
for individual_id, data in db.items():
if not individual_id.startswith('@I'):
continue
if 'famc' in data:
if data['birth'] > from_year and data['birth'] < to_year:
if sex == data['sex']:
if individual_id not in exclude:
ids.append(individual_id)
if ids:
return ids[int(random()*len(ids))]
return None
def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5):
if not husband_id:
if random() < 0.2:
exclude = siblings.copy()
if wife_id:
exclude += [wife_id]
husband_id = find_by_birth_date(
db, start_year, start_year + 10, sex='M', exclude=exclude)
if not husband_id:
husband_id = generate_individual(
db, start_year + int(random()*5), sex='M')
else:
print('reused {}'.format(husband_id))
if not wife_id:
if random() < 10.9:
exclude = siblings.copy() + [husband_id]
wife_id = find_by_birth_date(
db, start_year, start_year + 10, sex='F', exclude=exclude)
if not wife_id:
wife_id = generate_individual(
db, start_year + int(random()*5), sex='F')
else:
print('reused {}'.format(wife_id))
n_children = int((1+random()*(max_children-1)) *
(1 - db['n_individuals'] / db['max_individuals']))
marriage_year = start_year + 20 + int(random()*5)
children_ids = []
for i in range(n_children):
children_ids.append(generate_individual(
db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name']))
family_id = generate_family(
db, husband_id, wife_id, children_ids, marriage_year)
for i in range(n_children):
db[children_ids[i]]['string'] += "1 FAMC "+family_id + '\n'
db[children_ids[i]]['famc'] = family_id
if generations > 0:
generate_recursive_family(
db,
db[children_ids[i]]['birth'],
generations - 1,
children_ids[i] if db[children_ids[i]
]['sex'] == 'M' else None,
children_ids[i] if db[children_ids[i]
]['sex'] == 'F' else None,
children_ids)
db[husband_id]['string'] += "1 FAMS "+family_id + '\n'
db[wife_id]['string'] += "1 FAMS "+family_id + '\n'
generate_recursive_family(db, generations=8, max_children=4)
for k, v in db.items():
if k.startswith('@I'):
gedcom_content += v['string']
for k, v in db.items():
if k.startswith('@F'):
gedcom_content += v['string']
gedcom_content += '0 TRLR\n'
open(os.path.join(os.path.dirname(__file__), '..', 'tests',
'autogenerated.ged'), 'w').write(gedcom_content)
# generate_gedcom_file()
def generate_individual_images():
from PIL import Image, ImageDraw, ImageFont
def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)):
img = Image.new('RGB', size, color=color)
d = ImageDraw.Draw(img)
font = ImageFont.truetype(r'arial.ttf', font_size)
d.text(pos, text, fill=(0, 0, 0), font=font)
img.save(filename)
for i in range(20):
generate_one_image(
'tests/images/individual_I6_image_age_{}.png'.format(
1+i*4
), 'Age {}'.format(
1+i*4,
))
generate_individual_images()
| 35.548387
| 130
| 0.545977
| 850
| 6,612
| 4.035294
| 0.196471
| 0.041691
| 0.02449
| 0.020408
| 0.223032
| 0.18484
| 0.159767
| 0.146939
| 0.146939
| 0.11312
| 0
| 0.029804
| 0.320024
| 6,612
| 185
| 131
| 35.740541
| 0.733096
| 0.007411
| 0
| 0.096386
| 1
| 0
| 0.148849
| 0.006558
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042169
| false
| 0
| 0.03012
| 0
| 0.096386
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c7e5ef5e8a7277261b9729c9f251391fd2d29dc
| 1,415
|
py
|
Python
|
apps/goods/views_base.py
|
sunwei19910119/DjangoShop
|
188102dc8ef9f4751f4eeeb7574e95c8cc270484
|
[
"MIT"
] | 3
|
2018-08-22T02:41:55.000Z
|
2022-03-03T08:49:38.000Z
|
apps/goods/views_base.py
|
sunwei19910119/DjangoShop
|
188102dc8ef9f4751f4eeeb7574e95c8cc270484
|
[
"MIT"
] | null | null | null |
apps/goods/views_base.py
|
sunwei19910119/DjangoShop
|
188102dc8ef9f4751f4eeeb7574e95c8cc270484
|
[
"MIT"
] | 1
|
2019-10-23T12:24:08.000Z
|
2019-10-23T12:24:08.000Z
|
# encoding: utf-8
from goods.models import Goods
from django.views.generic.base import View
class GoodsListView(View):
def get(self, request):
"""
้่ฟdjango็viewๅฎ็ฐๅๅๅ่กจ้กต
"""
json_list = []
goods = Goods.objects.all()[:10]
# for good in goods:
# json_dict = {}
# json_dict["name"] = good.name
# json_dict["category"] = good.category.name
# json_dict["market_price"] = good.market_price
# json_dict["add_time"] = good.add_time
# json_list.append(json_dict)
# from django.http import HttpResponse
# import json
# return HttpResponse(json.dumps(json_list),content_type="application/json")
from django.forms.models import model_to_dict
for good in goods:
json_dict = model_to_dict(good)
json_list.append(json_dict)
import json
from django.core import serializers
json_data = serializers.serialize('json', goods)
json_data = json.loads(json_data)
from django.http import HttpResponse, JsonResponse
# jsonResponseๅ็ๅทฅไฝไนๅฐฑๆฏๅ ไธไบdumpsๅcontent_type
# return HttpResponse(json.dumps(json_data), content_type="application/json")
# ๆณจ้ๆloads๏ผไธ้ข่ฏญๅฅๆญฃๅธธ
# return HttpResponse(json_data, content_type="application/json")
return JsonResponse(json_data, safe=False)
| 32.159091
| 85
| 0.633922
| 159
| 1,415
| 5.45283
| 0.352201
| 0.073818
| 0.076125
| 0.089965
| 0.320646
| 0.129181
| 0
| 0
| 0
| 0
| 0
| 0.002913
| 0.272085
| 1,415
| 43
| 86
| 32.906977
| 0.838835
| 0.424735
| 0
| 0
| 0
| 0
| 0.005175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.375
| 0
| 0.5625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c81cc51df1ab53c03a469cdc7c5c3c8cd7e2980
| 508
|
py
|
Python
|
url_shortener/src/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
url_shortener/src/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
url_shortener/src/__init__.py
|
Andrelpoj/hire.me
|
79428e2094a6b56e762a7f958e1b75f395f59cef
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask
from .extensions import db
from .routes import short
from . import config
def create_app():
""" Creates Flask App, connect to Database and register Blueprint of routes"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.app_context().push()
db.init_app(app)
db.create_all()
app.register_blueprint(short)
return app
| 28.222222
| 83
| 0.690945
| 64
| 508
| 5.25
| 0.484375
| 0.10119
| 0.113095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222441
| 508
| 18
| 84
| 28.222222
| 0.850633
| 0.139764
| 0
| 0
| 0
| 0
| 0.128019
| 0.128019
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.307692
| 0
| 0.461538
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
7c82fafc5019f5e066e5d9af9ec1a1742645a993
| 27,180
|
py
|
Python
|
polyaxon_cli/cli/experiment.py
|
tiagopms/polyaxon-cli
|
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
|
[
"MIT"
] | null | null | null |
polyaxon_cli/cli/experiment.py
|
tiagopms/polyaxon-cli
|
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
|
[
"MIT"
] | null | null | null |
polyaxon_cli/cli/experiment.py
|
tiagopms/polyaxon-cli
|
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
import rhea
from polyaxon_cli.cli.getters.experiment import (
get_experiment_job_or_local,
get_project_experiment_or_local
)
from polyaxon_cli.cli.upload import upload
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError
from polyaxon_cli.logger import clean_outputs
from polyaxon_cli.managers.experiment import ExperimentManager
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_cli.utils import cache
from polyaxon_cli.utils.formatting import (
Printer,
dict_tabulate,
get_meta_response,
get_resources,
list_dicts_to_tabulate
)
from polyaxon_cli.utils.log_handler import get_logs_handler
from polyaxon_cli.utils.validation import validate_tags
from polyaxon_client.exceptions import PolyaxonClientException
def get_experiment_details(experiment): # pylint:disable=redefined-outer-name
if experiment.description:
Printer.print_header("Experiment description:")
click.echo('{}\n'.format(experiment.description))
if experiment.resources:
get_resources(experiment.resources.to_dict(), header="Experiment resources:")
if experiment.declarations:
Printer.print_header("Experiment declarations:")
dict_tabulate(experiment.declarations)
if experiment.last_metric:
Printer.print_header("Experiment last metrics:")
dict_tabulate(experiment.last_metric)
response = experiment.to_light_dict(
humanize_values=True,
exclude_attrs=[
'uuid', 'config', 'project', 'experiments', 'description',
'declarations', 'last_metric', 'resources', 'jobs', 'run_env'
])
Printer.print_header("Experiment info:")
dict_tabulate(Printer.add_status_color(response))
@click.group()
@click.option('--project', '-p', type=str, help="The project name, e.g. 'mnist' or 'adam/mnist'.")
@click.option('--experiment', '-xp', type=int, help="The experiment id number.")
@click.pass_context
@clean_outputs
def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name
"""Commands for experiments."""
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['experiment'] = experiment
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.pass_context
@clean_outputs
def get(ctx, job):
"""Get experiment or experiment job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting an experiment:
\b
```bash
$ polyaxon experiment get # if experiment is cached
```
\b
```bash
$ polyaxon experiment --experiment=1 get
```
\b
```bash
$ polyaxon experiment -xp 1 --project=cats-vs-dogs get
```
\b
```bash
$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get
```
Examples for getting an experiment job:
\b
```bash
$ polyaxon experiment get -j 1 # if experiment is cached
```
\b
```bash
$ polyaxon experiment --experiment=1 get --job=10
```
\b
```bash
$ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2
```
\b
```bash
$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2
```
"""
def get_experiment():
try:
response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)
cache.cache(config_manager=ExperimentManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_experiment_details(response)
def get_experiment_job():
try:
response = PolyaxonClient().experiment_job.get_job(user,
project_name,
_experiment,
_job)
cache.cache(config_manager=ExperimentJobManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.resources:
get_resources(response.resources.to_dict(), header="Job resources:")
response = Printer.add_status_color(response.to_light_dict(
humanize_values=True,
exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']
))
Printer.print_header("Job info:")
dict_tabulate(response)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job()
else:
get_experiment()
@experiment.command()
@click.pass_context
@clean_outputs
def delete(ctx):
"""Delete experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon experiment delete
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if not click.confirm("Are sure you want to delete experiment `{}`".format(_experiment)):
click.echo('Existing without deleting experiment.')
sys.exit(1)
try:
response = PolyaxonClient().experiment.delete_experiment(
user, project_name, _experiment)
# Purge caching
ExperimentManager.purge()
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not delete experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 204:
Printer.print_success("Experiment `{}` was delete successfully".format(_experiment))
@experiment.command()
@click.option('--name', type=str,
help='Name of the experiment, must be unique within the project, could be none.')
@click.option('--description', type=str, help='Description of the experiment.')
@click.option('--tags', type=str, help='Tags of the experiment, comma separated values.')
@click.pass_context
@clean_outputs
def update(ctx, name, description, tags):
"""Update experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment -xp 2 update --description="new description for my experiments"
```
\b
```bash
$ polyaxon experiment -xp 2 update --tags="foo, bar" --name="unique-name"
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
tags = validate_tags(tags)
if tags:
update_dict['tags'] = tags
if not update_dict:
Printer.print_warning('No argument was provided to update the experiment.')
sys.exit(0)
try:
response = PolyaxonClient().experiment.update_experiment(
user, project_name, _experiment, update_dict)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not update experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment updated.")
get_experiment_details(response)
@experiment.command()
@click.option('--yes', '-y', is_flag=True, default=False,
help="Automatic yes to prompts. "
"Assume \"yes\" as answer to all prompts and run non-interactively.")
@click.pass_context
@clean_outputs
def stop(ctx, yes):
"""Stop experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment stop
```
\b
```bash
$ polyaxon experiment -xp 2 stop
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if not yes and not click.confirm("Are sure you want to stop "
"experiment `{}`".format(_experiment)):
click.echo('Existing without stopping experiment.')
sys.exit(0)
try:
PolyaxonClient().experiment.stop(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is being stopped.")
@experiment.command()
@click.option('--copy', '-c', is_flag=True, default=False,
help="To copy the experiment before restarting.")
@click.option('--file', '-f', multiple=True, type=click.Path(exists=True),
help="The polyaxon files to update with.")
@click.option('-u', is_flag=True, default=False,
help="To upload the repo before restarting.")
@click.pass_context
@clean_outputs
def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin
"""Restart experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 restart
```
"""
config = None
update_code = None
if file:
config = rhea.read(file)
# Check if we need to upload
if u:
ctx.invoke(upload, sync=False)
update_code = True
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
if copy:
response = PolyaxonClient().experiment.copy(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was copied with id {}'.format(response.id))
else:
response = PolyaxonClient().experiment.restart(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was restarted with id {}'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not restart experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
@experiment.command()
@click.option('--file', '-f', multiple=True, type=click.Path(exists=True),
help="The polyaxon files to update with.")
@click.option('-u', is_flag=True, default=False,
help="To upload the repo before resuming.")
@click.pass_context
@clean_outputs
def resume(ctx, file, u): # pylint:disable=redefined-builtin
"""Resume experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 resume
```
"""
config = None
update_code = None
if file:
config = rhea.read(file)
# Check if we need to upload
if u:
ctx.invoke(upload, sync=False)
update_code = True
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
response = PolyaxonClient().experiment.resume(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was resumed with id {}'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not resume experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
@experiment.command()
@click.option('--page', type=int, help="To paginate through the list of jobs.")
@click.pass_context
@clean_outputs
def jobs(ctx, page):
"""List jobs for experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 jobs
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
page = page or 1
try:
response = PolyaxonClient().experiment.list_jobs(
user, project_name, _experiment, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Jobs:")
objects.pop('experiment', None)
dict_tabulate(objects, is_list_dict=True)
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--page', type=int, help="To paginate through the list of statuses.")
@click.pass_context
@clean_outputs
def statuses(ctx, job, page):
"""Get experiment or experiment job statuses.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples getting experiment statuses:
\b
```bash
$ polyaxon experiment statuses
```
\b
```bash
$ polyaxon experiment -xp 1 statuses
```
Examples getting experiment job statuses:
\b
```bash
$ polyaxon experiment statuses -j 3
```
\b
```bash
$ polyaxon experiment -xp 1 statuses --job 1
```
"""
def get_experiment_statuses():
try:
response = PolyaxonClient().experiment.get_statuses(
user, project_name, _experiment, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))
objects = list_dicts_to_tabulate(
[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')
for o in response['results']])
if objects:
Printer.print_header("Statuses:")
objects.pop('experiment', None)
dict_tabulate(objects, is_list_dict=True)
def get_experiment_job_statuses():
try:
response = PolyaxonClient().experiment_job.get_statuses(user,
project_name,
_experiment,
_job,
page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get status for job `{}`.'.format(job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Statuses for Job `{}`.'.format(_job))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No statuses found for job `{}`.'.format(_job))
objects = list_dicts_to_tabulate(
[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')
for o in response['results']])
if objects:
Printer.print_header("Statuses:")
objects.pop('job', None)
dict_tabulate(objects, is_list_dict=True)
page = page or 1
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_statuses()
else:
get_experiment_statuses()
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--gpu', '-g', is_flag=True, help="List experiment GPU resources.")
@click.pass_context
@clean_outputs
def resources(ctx, job, gpu):
"""Get experiment or experiment job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting experiment resources:
\b
```bash
$ polyaxon experiment -xp 19 resources
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources --gpu
```
Examples for getting experiment job resources:
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1 --gpu
```
"""
def get_experiment_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment.resources(
user, project_name, _experiment, message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_experiment_job_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment_job.resources(user,
project_name,
_experiment,
_job,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_resources()
else:
get_experiment_resources()
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--past', '-p', is_flag=True, help="Show the past logs.")
@click.option('--follow', '-f', is_flag=True, default=False,
help="Stream logs after showing past logs.")
@click.option('--hide_time', is_flag=True, default=False,
help="Whether or not to hide timestamps from the log stream.")
@click.pass_context
@clean_outputs
def logs(ctx, job, past, follow, hide_time):
"""Get experiment or experiment job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting experiment logs:
\b
```bash
$ polyaxon experiment logs
```
\b
```bash
$ polyaxon experiment -xp 10 -p mnist logs
```
Examples for getting experiment job logs:
\b
```bash
$ polyaxon experiment -xp 1 -j 1 logs
```
"""
def get_experiment_logs():
if past:
try:
response = PolyaxonClient().experiment.logs(
user, project_name, _experiment, stream=False)
get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error(
'Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error(
'Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().experiment.logs(
user,
project_name,
_experiment,
message_handler=get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_experiment_job_logs():
if past:
try:
response = PolyaxonClient().experiment_job.logs(
user,
project_name,
_experiment,
_job,
stream=False)
get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error(
'Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error(
'Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().experiment_job.logs(
user,
project_name,
_experiment,
_job,
message_handler=get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_logs()
else:
get_experiment_logs()
@experiment.command()
@click.pass_context
@clean_outputs
def outputs(ctx):
"""Download outputs for experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment -xp 1 outputs
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.download_outputs(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.')
@experiment.command()
@click.pass_context
@clean_outputs
def bookmark(ctx):
"""Bookmark experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment bookmark
```
\b
```bash
$ polyaxon experiment -xp 2 bookmark
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.bookmark(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is bookmarked.")
@experiment.command()
@click.pass_context
@clean_outputs
def unbookmark(ctx):
"""Unbookmark experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment unbookmark
```
\b
```bash
$ polyaxon experiment -xp 2 unbookmark
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.unbookmark(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is unbookmarked.")
| 33.84807
| 99
| 0.606659
| 2,827
| 27,180
| 5.659356
| 0.089848
| 0.048753
| 0.040378
| 0.051566
| 0.768986
| 0.705607
| 0.636852
| 0.606163
| 0.585349
| 0.570286
| 0
| 0.003379
| 0.281347
| 27,180
| 802
| 100
| 33.890274
| 0.815697
| 0.134106
| 0
| 0.587473
| 0
| 0
| 0.14221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049676
| false
| 0.030238
| 0.034557
| 0
| 0.088553
| 0.146868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c85f2097ce6518402e3aa24b38cc365cc5ffeaa
| 4,981
|
py
|
Python
|
Whats Cooking/KaggleCookingComparison.py
|
rupakc/Kaggle-Compendium
|
61634ba742f9a0239f2d1e45973c4bb477ac6306
|
[
"MIT"
] | 17
|
2018-01-11T05:49:06.000Z
|
2021-08-22T16:50:10.000Z
|
Whats Cooking/KaggleCookingComparison.py
|
Tuanlase02874/Machine-Learning-Kaggle
|
c31651acd8f2407d8b60774e843a2527ce19b013
|
[
"MIT"
] | null | null | null |
Whats Cooking/KaggleCookingComparison.py
|
Tuanlase02874/Machine-Learning-Kaggle
|
c31651acd8f2407d8b60774e843a2527ce19b013
|
[
"MIT"
] | 8
|
2017-11-27T06:58:50.000Z
|
2021-08-22T16:50:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 13:20:45 2015
Code for Kaggle What's Cooking Competition
It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach
1. Adaboost
2. Extratrees
3. Bagging
4. Random Forests
@author: Rupak Chakraborty
"""
import numpy as np
import time
import json
import ClassificationUtils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import metrics
# Create the feature extractors
bag_of_words = CountVectorizer(stop_words='english')
tfidf = TfidfVectorizer(stop_words='english')
hashvec = HashingVectorizer(stop_words='english')
# Create the Classifier objects
adaboost = AdaBoostClassifier()
randomforest = RandomForestClassifier()
extratrees = ExtraTreesClassifier()
bagging = BaggingClassifier()
filepath = "train.json"
f = open(filepath,"r")
content = f.read()
jsonData = json.loads(content)
cuisine_set = set([])
ingredient_set = set([])
cuisine_map = {}
cuisine_numerical_map = {}
ingredient_numerical_map = {}
ingredient_map = {}
ingredient_list = list([])
c = 0
print "Size of the data set : ", len(jsonData)
print "Starting Loading of Data Set...."
start = time.time()
for recipe in jsonData:
if "cuisine" in recipe:
s = ""
if recipe["cuisine"] in cuisine_set:
cuisine_map[recipe["cuisine"]] = cuisine_map[recipe["cuisine"]] + 1
else:
cuisine_map[recipe["cuisine"]] = 1
cuisine_set.add(recipe["cuisine"])
for ingredient in recipe["ingredients"]:
if ingredient in ingredient_set:
ingredient_map[ingredient] = ingredient_map[ingredient] + 1
else:
ingredient_map[ingredient] = 1
ingredient_set.add(ingredient)
s = s + " " + ingredient
ingredient_list.append(s)
end = time.time()
print "Time Taken to Load the Dataset : ",end-start
for cuisine in cuisine_set:
cuisine_numerical_map[cuisine] = c
c = c+1
c = 0
for ingredient in ingredient_set:
ingredient_numerical_map[ingredient] = c
c = c+1
print "Starting Feature Extracting ......"
start = time.time()
train_labels = np.zeros(len(ingredient_list))
train_data_tfidf = tfidf.fit_transform(ingredient_list)
train_data_hash = hashvec.fit_transform(ingredient_list)
train_data_bag = bag_of_words.fit_transform(ingredient_list)
c = 0
for recipe in jsonData:
if "cuisine" in recipe:
train_labels[c] = cuisine_numerical_map[recipe["cuisine"]]
c = c+1
end = time.time()
print "Time Taken to Train Extract Different Features : ", end-start
test_labels = train_labels[1:30000]
test_data_tfidf = tfidf.transform(ingredient_list[1:30000])
test_data_hash = hashvec.transform(ingredient_list[1:30000])
test_data_bag = bag_of_words.transform(ingredient_list[1:30000])
print "Starting Training of Models for Hash Vectorizer Feature....."
start = time.time()
adaboost.fit(train_data_bag,train_labels)
randomforest.fit(train_data_bag,train_labels)
extratrees.fit(train_data_bag,train_labels)
bagging.fit(train_data_bag,train_labels)
end=time.time()
print "Time Taken to train all Ensemble Models : ", end-start
print "Starting Prediction of Test Labels ...."
start = time.time()
ada_predict = adaboost.predict(test_data_bag)
rf_predict = randomforest.predict(test_data_bag)
extree_predict = extratrees.predict(test_data_bag)
bagging_predict = bagging.predict(test_data_bag)
end = time.time()
print "Time Taken to Test the models : ", end-start
print "Accuracy of AdaBoost Algorithm : ", metrics.accuracy_score(test_labels,ada_predict)
print "Accuracy of Random Forests : ", metrics.accuracy_score(test_labels,rf_predict)
print "Accuracy of Extra Trees : ", metrics.accuracy_score(test_labels,extree_predict)
print "Accuracy of Bagging : ", metrics.accuracy_score(test_labels,bagging_predict)
# Saving the tf-idf model and classifiers
ClassificationUtils.save_classifier("ada_bag_cook.pickle",adaboost)
ClassificationUtils.save_classifier("rf_bag_cook.pickle",randomforest)
ClassificationUtils.save_classifier("extree_bag_cook.pickle",extratrees)
ClassificationUtils.save_classifier("bagging_bag_cook.pickle",bagging)
ClassificationUtils.save_classifier("bag_of_words.pickle",tfidf)
def printIngredientDistribution():
print "----------- Distribution of the Recipe Ingredients ------------------"
for key in ingredient_map.keys():
print key, " : " ,ingredient_map[key]
def printCuisineDistribution():
print "----------- Distribution of the Cuisines ------------------"
for key in cuisine_map.keys():
print key, " : " ,cuisine_map[key]
| 32.344156
| 90
| 0.739611
| 635
| 4,981
| 5.607874
| 0.233071
| 0.019657
| 0.038753
| 0.028082
| 0.251896
| 0.155013
| 0.074136
| 0.038192
| 0
| 0
| 0
| 0.012103
| 0.153985
| 4,981
| 153
| 91
| 32.555556
| 0.832938
| 0.024493
| 0
| 0.183486
| 0
| 0
| 0.171447
| 0.009778
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.110092
| null | null | 0.174312
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c88b8dca0946deb62b53070c85ee8a8bd47974e
| 845
|
py
|
Python
|
initial_load.py
|
hongyuanChrisLi/RealEstateDBConvert
|
0fd04f5213ff3fd3548db3f322828bd80cf41791
|
[
"Apache-2.0"
] | null | null | null |
initial_load.py
|
hongyuanChrisLi/RealEstateDBConvert
|
0fd04f5213ff3fd3548db3f322828bd80cf41791
|
[
"Apache-2.0"
] | null | null | null |
initial_load.py
|
hongyuanChrisLi/RealEstateDBConvert
|
0fd04f5213ff3fd3548db3f322828bd80cf41791
|
[
"Apache-2.0"
] | null | null | null |
from mysql_dao.select_dao import SelectDao as MysqlSelectDao
from postgres_dao.ddl_dao import DdlDao
from postgres_dao.dml_dao import DmlDao as PsqlDmlDao
psql_ddl_dao = DdlDao()
mysql_select_dao = MysqlSelectDao()
psql_dml_dao = PsqlDmlDao()
psql_ddl_dao.create_tables()
county_data = mysql_select_dao.select_all_counties()
psql_dml_dao.insert_county(county_data)
city_data = mysql_select_dao.select_all_cities()
psql_dml_dao.insert_city(city_data)
zipcode_data = mysql_select_dao.select_all_zipcodes()
psql_dml_dao.insert_zipcode(zipcode_data)
data = mysql_select_dao.select_full_addr_month_rpt()
psql_dml_dao.trunc_addr_month_rpt()
psql_dml_dao.insert_addr_month_rpt(data)
data = mysql_select_dao.select_full_mls_daily_rpt()
psql_dml_dao.trunc_mls_rpt()
psql_dml_dao.insert_mls_rpt(data)
mysql_select_dao.close()
psql_dml_dao.close()
| 28.166667
| 60
| 0.857988
| 141
| 845
| 4.602837
| 0.241135
| 0.09245
| 0.138675
| 0.16641
| 0.365177
| 0.291217
| 0.098613
| 0
| 0
| 0
| 0
| 0
| 0.068639
| 845
| 29
| 61
| 29.137931
| 0.824651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c8e9965cc893f149c68d0938c7cdd288fb5e3a7
| 980
|
py
|
Python
|
src/urh/ui/delegates/CheckBoxDelegate.py
|
awesome-archive/urh
|
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
|
[
"Apache-2.0"
] | 1
|
2017-06-21T02:37:16.000Z
|
2017-06-21T02:37:16.000Z
|
src/urh/ui/delegates/CheckBoxDelegate.py
|
dspmandavid/urh
|
30643c1a68634b1c97eb9989485a4e96a3b038ae
|
[
"Apache-2.0"
] | null | null | null |
src/urh/ui/delegates/CheckBoxDelegate.py
|
dspmandavid/urh
|
30643c1a68634b1c97eb9989485a4e96a3b038ae
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot
from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox
class CheckBoxDelegate(QItemDelegate):
def __init__(self, parent=None):
super().__init__(parent)
self.enabled = True
def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex):
editor = QCheckBox(parent)
editor.stateChanged.connect(self.stateChanged)
return editor
def setEditorData(self, editor: QCheckBox, index: QModelIndex):
editor.blockSignals(True)
editor.setChecked(index.model().data(index))
self.enabled = editor.isChecked()
editor.blockSignals(False)
def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex):
model.setData(index, editor.isChecked(), Qt.EditRole)
@pyqtSlot()
def stateChanged(self):
self.commitData.emit(self.sender())
| 37.692308
| 94
| 0.715306
| 97
| 980
| 7.14433
| 0.443299
| 0.069264
| 0.063492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002503
| 0.184694
| 980
| 26
| 95
| 37.692308
| 0.864831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.1
| 0
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c9666a6d0704c6c5a1d15ed10e9ce79d7670676
| 3,215
|
py
|
Python
|
project/server/models.py
|
mvlima/flask-jwt-auth
|
6cb210b50888b1e9a41ea9e63a80eafcbe436560
|
[
"MIT"
] | null | null | null |
project/server/models.py
|
mvlima/flask-jwt-auth
|
6cb210b50888b1e9a41ea9e63a80eafcbe436560
|
[
"MIT"
] | null | null | null |
project/server/models.py
|
mvlima/flask-jwt-auth
|
6cb210b50888b1e9a41ea9e63a80eafcbe436560
|
[
"MIT"
] | null | null | null |
# project/server/models.py
import jwt
import datetime
from project.server import app, db, bcrypt
class User(db.Model):
""" User Model for storing user related details """
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(255), unique=True, nullable=False)
email = db.Column(db.String(255), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
age = db.Column(db.Integer, nullable=False)
address = db.Column(db.Integer(255), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
admin = db.Column(db.Boolean, nullable=False, default=False)
def __init__(self, email, username, password, name, age, address, admin=False):
self.email = email
self.username = username
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode()
self.name = name
self.age = age
self.address = address
self.registered_on = datetime.datetime.now()
self.admin = admin
def encode_auth_token(self, user_id):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
class BlacklistToken(db.Model):
"""
Token Model for storing JWT tokens
"""
__tablename__ = 'blacklist_tokens'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(500), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
# Check whether auth token has been blacklisted
res = BlacklistToken.query.filter_by(token=str(auth_token)).first()
if res:
return True
else:
return False
| 32.806122
| 90
| 0.612753
| 368
| 3,215
| 5.211957
| 0.296196
| 0.050052
| 0.062565
| 0.04171
| 0.18561
| 0.163712
| 0.163712
| 0.095933
| 0.095933
| 0.052138
| 0
| 0.009961
| 0.281804
| 3,215
| 97
| 91
| 33.14433
| 0.820702
| 0.080871
| 0
| 0.115942
| 0
| 0
| 0.07058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.057971
| 0.043478
| 0.014493
| 0.492754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7c98495a22a6d3d8755497c989624d8a5c427192
| 60,943
|
py
|
Python
|
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
value_str.replace('\\n', '\n')
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = unicode(self.rule['alert_subject'])
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_subject_values)):
if alert_subject_values[i] is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable()
text_table.header(summary_table_fields_with_count)
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.iteritems():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
return unicode(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = match[qk]
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])
conn.start()
conn.connect(self.stomp_login, self.stomp_password)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port')
if self.rule.get('smtp_auth_file'):
self.get_account(self.rule['smtp_auth_file'])
self.smtp_key_file = self.rule.get('smtp_key_file')
self.smtp_cert_file = self.rule.get('smtp_cert_file')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], basestring):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, basestring):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, basestring):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, basestring):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'email_add_domain' in self.rule:
to_addr = [name + self.rule['email_add_domain'] for name in to_addr]
email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
if 'smtp_auth_file' in self.rule:
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.close()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_after_inactivity',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age)
self.watchers = self.rule.get('jira_watchers')
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
logging.warning(msg)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
try:
if self.priority is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys()))
def get_arbitrary_fields(self):
# This API returns metadata about all the fields defined on the jira server (built-ins and custom ones)
fields = self.client.fields()
for jira_field, value in self.rule.iteritems():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list:
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = unicode(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline['jira_ticket'] = None
self.pipeline['jira_server'] = self.server
return None
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
logging.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(
watcher,
ex
)), None, sys.exc_info()[2]
except JIRAError as e:
raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = u'{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], basestring):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
# Format the command and arguments
try:
if self.new_style_string_format:
command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]
else:
command = [command_arg % matches[0] for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json)
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
""" Send alert using AWS SNS service """
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.aws_access_key_id = self.rule.get('aws_access_key_id')
self.aws_secret_access_key = self.rule.get('aws_secret_access_key')
self.aws_region = self.rule.get('aws_region', 'us-east-1')
self.profile = self.rule.get('boto_profile', None) # Deprecated
self.profile = self.rule.get('aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
profile_name=self.profile
)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class HipChatAlerter(Alerter):
""" Creates a HipChat room notification for each alert """
required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])
def __init__(self, rule):
super(HipChatAlerter, self).__init__(rule)
self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')
self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')
self.hipchat_auth_token = self.rule['hipchat_auth_token']
self.hipchat_room_id = self.rule['hipchat_room_id']
self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')
self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)
self.hipchat_notify = self.rule.get('hipchat_notify', True)
self.hipchat_from = self.rule.get('hipchat_from', '')
self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (
self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)
self.hipchat_proxy = self.rule.get('hipchat_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# HipChat sends 400 bad request on messages longer than 10000 characters
if (len(body) > 9999):
body = body[:9980] + '..(truncated)'
# Use appropriate line ending for text/html
if self.hipchat_message_format == 'html':
body = body.replace('\n', '<br />')
# Post to HipChat
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None
payload = {
'color': self.hipchat_msg_color,
'message': body,
'message_format': self.hipchat_message_format,
'notify': self.hipchat_notify,
'from': self.hipchat_from
}
try:
if self.hipchat_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,
verify=not self.hipchat_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to HipChat: %s" % e)
elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def get_info(self):
return {'type': 'hipchat',
'hipchat_room_id': self.hipchat_room_id}
class MsTeamsAlerter(Alerter):
""" Creates a Microsoft Teams Conversation Message for each alert """
required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])
def __init__(self, rule):
super(MsTeamsAlerter, self).__init__(rule)
self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']
if isinstance(self.ms_teams_webhook_url, basestring):
self.ms_teams_webhook_url = [self.ms_teams_webhook_url]
self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)
self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')
self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)
self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')
def format_body(self, body):
body = body.encode('UTF-8')
if self.ms_teams_alert_fixed_width:
body = body.replace('`', "'")
body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to Teams
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None
payload = {
'@type': 'MessageCard',
'@context': 'http://schema.org/extensions',
'summary': self.ms_teams_alert_summary,
'title': self.create_title(matches),
'text': body
}
if self.ms_teams_theme_color != '':
payload['themeColor'] = self.ms_teams_theme_color
for url in self.ms_teams_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ms teams: %s" % e)
elastalert_logger.info("Alert sent to MS Teams")
def get_info(self):
return {'type': 'ms_teams',
'ms_teams_webhook_url': self.ms_teams_webhook_url}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, basestring):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
body = body.encode('UTF-8')
body = body.replace('&', '&')
body = body.replace('<', '<')
body = body.replace('>', '>')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'channel': self.slack_channel_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'mrkdwn_in': ['text', 'pretext'],
'fields': []
}
]
}
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
for url in self.slack_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert sent to Slack")
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override,
'slack_webhook_url': self.slack_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
payload = {
'service_key': self.pagerduty_service_key,
'description': self.create_title(matches),
'event_type': 'trigger',
'incident_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'details': {
"information": body.encode('UTF-8'),
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(
self.url,
data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),
headers=headers,
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
elastalert_logger.info("Trigger sent to PagerDuty")
def get_incident_key(self, matches):
if self.pagerduty_incident_key_args:
incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]
# Populate values with rule level properties too
for i in range(len(incident_key_values)):
if incident_key_values[i] is None:
key_value = self.rule.get(self.pagerduty_incident_key_args[i])
if key_value:
incident_key_values[i] = key_value
incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values]
return self.pagerduty_incident_key.format(*incident_key_values)
else:
return self.pagerduty_incident_key
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except:
raise EAException("Error posting to Exotel"), None, sys.exc_info()[2]
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_account_sid = self.rule['twilio_account_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule['twilio_from_number']
def alert(self, matches):
client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)
try:
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_from_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
def alert(self, matches):
body = u'โ *%s* โ ```\n' % (self.create_title(matches))
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
body += u' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s" % e)
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set([
'username',
'password',
'servicenow_rest_url',
'short_description',
'comments',
'assignment_group',
'category',
'subcategory',
'cmdb_ci',
'caller_id'
])
def __init__(self, rule):
super(ServiceNowAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(
self.servicenow_rest_url,
auth=(self.rule['username'], self.rule['password']),
headers=headers,
data=json.dumps(payload, cls=DateTimeEncoder),
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class HTTPPostAlerter(Alerter):
""" Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """
def __init__(self, rule):
super(HTTPPostAlerter, self).__init__(rule)
post_url = self.rule.get('http_post_url')
if isinstance(post_url, basestring):
post_url = [post_url]
self.post_url = post_url
self.post_proxy = self.rule.get('http_post_proxy')
self.post_payload = self.rule.get('http_post_payload', {})
self.post_static_payload = self.rule.get('http_post_static_payload', {})
self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)
def alert(self, matches):
""" Each match will trigger a POST to the specified endpoint(s). """
for match in matches:
payload = match if self.post_all_values else {}
payload.update(self.post_static_payload)
for post_key, es_key in self.post_payload.items():
payload[post_key] = lookup_es_key(match, es_key)
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.post_proxy} if self.post_proxy else None
for url in self.post_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting HTTP Post alert: %s" % e)
elastalert_logger.info("HTTP Post alert sent.")
def get_info(self):
return {'type': 'http_post',
'http_post_webhook_url': self.post_url}
| 44.289971
| 137
| 0.607814
| 7,440
| 60,943
| 4.771371
| 0.104301
| 0.05048
| 0.026959
| 0.006704
| 0.427646
| 0.314544
| 0.252373
| 0.207583
| 0.181695
| 0.173526
| 0
| 0.002861
| 0.283101
| 60,943
| 1,375
| 138
| 44.322182
| 0.809613
| 0.093514
| 0
| 0.245411
| 0
| 0.000966
| 0.15868
| 0.024326
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.011594
| 0.029952
| null | null | 0.002899
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c9e60fb8b9a1847e8db908d6cfa14b5a53e1aaf
| 623
|
py
|
Python
|
API/migrations/0005_alter_news_date_time_alter_news_headline.py
|
kgarchie/ReSTful-Django-API
|
851c76eb75747042ceac0a6c164266409ca935d4
|
[
"MIT"
] | null | null | null |
API/migrations/0005_alter_news_date_time_alter_news_headline.py
|
kgarchie/ReSTful-Django-API
|
851c76eb75747042ceac0a6c164266409ca935d4
|
[
"MIT"
] | null | null | null |
API/migrations/0005_alter_news_date_time_alter_news_headline.py
|
kgarchie/ReSTful-Django-API
|
851c76eb75747042ceac0a6c164266409ca935d4
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-03-23 14:31
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API', '0004_alter_news_date_time_alter_news_headline'),
]
operations = [
migrations.AlterField(
model_name='news',
name='date_time',
field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)),
),
migrations.AlterField(
model_name='news',
name='headline',
field=models.CharField(max_length=100),
),
]
| 24.92
| 98
| 0.603531
| 69
| 623
| 5.289855
| 0.594203
| 0.049315
| 0.136986
| 0.158904
| 0.20274
| 0.20274
| 0
| 0
| 0
| 0
| 0
| 0.089485
| 0.282504
| 623
| 24
| 99
| 25.958333
| 0.727069
| 0.072231
| 0
| 0.333333
| 1
| 0
| 0.126736
| 0.078125
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7ca33bba047d555eff412922059b6da8837f7980
| 270
|
py
|
Python
|
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
|
FriendRat/pyo3
|
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
|
[
"Apache-2.0"
] | 1
|
2021-06-18T16:27:31.000Z
|
2021-06-18T16:27:31.000Z
|
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
|
FriendRat/pyo3
|
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
|
[
"Apache-2.0"
] | 5
|
2021-11-08T22:05:41.000Z
|
2022-03-28T22:07:04.000Z
|
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
|
FriendRat/pyo3
|
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
|
[
"Apache-2.0"
] | null | null | null |
from setuptools_rust_starter import PythonClass, ExampleClass
def test_python_class() -> None:
py_class = PythonClass(value=10)
assert py_class.value == 10
def test_example_class() -> None:
example = ExampleClass(value=11)
assert example.value == 11
| 22.5
| 61
| 0.733333
| 35
| 270
| 5.428571
| 0.514286
| 0.073684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035874
| 0.174074
| 270
| 11
| 62
| 24.545455
| 0.816144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7caa2f54344b5d827f792813f87cde352f46a120
| 827
|
py
|
Python
|
StateGoHome.py
|
LHGames-2017/superintelligence
|
bd9ea3d444e571a0f9607bf0f6799807f7e644ca
|
[
"MIT"
] | null | null | null |
StateGoHome.py
|
LHGames-2017/superintelligence
|
bd9ea3d444e571a0f9607bf0f6799807f7e644ca
|
[
"MIT"
] | null | null | null |
StateGoHome.py
|
LHGames-2017/superintelligence
|
bd9ea3d444e571a0f9607bf0f6799807f7e644ca
|
[
"MIT"
] | null | null | null |
from PlayerState import *
from pathFinder import PathFinder
from StateLook4Resources import *
class StateGoHome(PlayerState):
""" State Implementation: has a resource and go back home """
def __init__(self, player):
self.player = player
self.player.setTarget(self.player.playerData.HouseLocation)
def doAction(self):
origin = self.player.playerData.Position
target = self.player.target
moves = PathFinder(self.player.mapView).getPath(origin, target)
# If player just gave the resource home, look 4 resources again
if(not self.player.hasResources()):
self.player.state = StateLook4Resources(self.player)
return create_purchase_action(0)
return create_move_action(moves[0])
def toString():
return "StateGoHome"
| 31.807692
| 71
| 0.689238
| 93
| 827
| 6.043011
| 0.505376
| 0.177936
| 0.05694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.226119
| 827
| 25
| 72
| 33.08
| 0.870313
| 0.141475
| 0
| 0
| 0
| 0
| 0.015647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.176471
| 0.058824
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
7caf56de8045038d74971a889dbed39c31d7bb50
| 1,306
|
py
|
Python
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py
|
BReduardokramer/gaia
|
c00302cdcd435ab193e8365917cfc6abac9e4f2e
|
[
"Apache-2.0"
] | 1
|
2021-11-09T00:27:34.000Z
|
2021-11-09T00:27:34.000Z
|
tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py
|
AmyYLee/gaia
|
a5dbae8235163d7f985bdeb7d649268f02749a8b
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaia-ui-tests/gaiatest/tests/functional/lockscreen/test_lockscreen_unlock_to_camera_with_passcode.py
|
AmyYLee/gaia
|
a5dbae8235163d7f985bdeb7d649268f02749a8b
|
[
"Apache-2.0"
] | null | null | null |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.lockscreen.app import LockScreen
class TestCameraUnlockWithPasscode(GaiaTestCase):
# Input data
_input_passcode = '7931'
def setUp(self):
GaiaTestCase.setUp(self)
# Turn off geolocation prompt
self.apps.set_permission('System', 'geolocation', 'deny')
self.data_layer.set_setting('lockscreen.passcode-lock.code', self._input_passcode)
self.data_layer.set_setting('lockscreen.passcode-lock.enabled', True)
# this time we need it locked!
self.lockscreen.lock()
self.lock_screen = LockScreen(self.marionette)
def test_unlock_to_camera_with_passcode(self):
# https://github.com/mozilla/gaia-ui-tests/issues/479
camera = self.lock_screen.unlock_to_camera()
self.lock_screen.wait_for_lockscreen_not_visible()
camera.switch_to_camera_frame()
self.assertFalse(camera.is_gallery_button_visible)
camera.tap_switch_source()
camera.wait_for_capture_ready()
self.assertFalse(camera.is_gallery_button_visible)
| 31.095238
| 90
| 0.717458
| 173
| 1,306
| 5.202312
| 0.520231
| 0.026667
| 0.046667
| 0.035556
| 0.195556
| 0.195556
| 0.195556
| 0.1
| 0
| 0
| 0
| 0.010427
| 0.19219
| 1,306
| 41
| 91
| 31.853659
| 0.842654
| 0.238897
| 0
| 0.105263
| 0
| 0
| 0.087221
| 0.061866
| 0
| 0
| 0
| 0
| 0.105263
| 1
| 0.105263
| false
| 0.263158
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7cb2d3d2cb22c43c3c911d744e22c33bc37cdf49
| 1,661
|
py
|
Python
|
landing/views.py
|
theflatladder/kyrsovaya
|
d6d661854cd955e544a199e201f325decc360cc1
|
[
"MIT"
] | null | null | null |
landing/views.py
|
theflatladder/kyrsovaya
|
d6d661854cd955e544a199e201f325decc360cc1
|
[
"MIT"
] | null | null | null |
landing/views.py
|
theflatladder/kyrsovaya
|
d6d661854cd955e544a199e201f325decc360cc1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm
from django.template.context_processors import csrf
from django.http import HttpResponseRedirect
def login(request):
args = {}
args.update(csrf(request))
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('/main')
else:
args['login_error'] = "ะะพะปัะทะพะฒะฐัะตะปั ะฝะต ะฝะฐะนะดะตะฝ ะธะปะธ ะฟะฐัะพะปั ะฒะฒะตะดะตะฝ ะฝะตะฒะตัะฝัะน ะฟะฐัะพะปั"
return render_to_response('login.html', args)
else:
return render_to_response('login.html', args)
def reg(request):
auth.logout(request)
error = ''
if request.method == "POST":
newuser_form = UserCreationForm(data = request.POST)
if newuser_form.is_valid():
newuser_form.save()
newuser = auth.authenticate(username = newuser_form.cleaned_data['username'], password = newuser_form.cleaned_data['password1'])
auth.login(request, newuser)
return redirect('/main')
else:
error = 'ะัะพะฒะตัััะต ะฟัะฐะฒะธะปัะฝะพััั ะฒะฒะพะดะธะผัั
ะดะฐะฝะฝัั
.'
else:
newuser_form = UserCreationForm()
return render(request, 'reg.html', locals() )
def main(request):
return render(request, 'index.html', {'username': auth.get_user(request).username} )
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/login")
| 31.339623
| 140
| 0.668874
| 186
| 1,661
| 5.876344
| 0.327957
| 0.060384
| 0.043916
| 0.040256
| 0.064044
| 0.064044
| 0.064044
| 0
| 0
| 0
| 0
| 0.000777
| 0.225166
| 1,661
| 52
| 141
| 31.942308
| 0.848485
| 0
| 0
| 0.25
| 0
| 0
| 0.123494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.075
| 0.125
| 0.025
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
7cb439e7ed9a5e950d6cf894c40e5a62043d06e9
| 5,183
|
py
|
Python
|
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2tmx
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import tmx
from translate.storage import lisa
class TestPO2TMX:
def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'):
"""helper that converts po source to tmx source without requiring files"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)
po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage)
return outputfile.tmxfile
def test_basic(self):
minipo = r"""# Afrikaans translation of program ABC
#
msgid ""
msgstr ""
"Project-Id-Version: program 2.1-branch\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-01-09 07:15+0100\n"
"PO-Revision-Date: 2004-03-30 17:02+0200\n"
"Last-Translator: Zuza Software Foundation <[email protected]>\n"
"Language-Team: Afrikaans <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
# Please remember to do something
#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4
msgid "Applications"
msgstr "Toepassings"
"""
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("Applications") == "Toepassings"
assert tmx.translate("bla") is None
xmltext = str(tmx)
assert xmltext.index('creationtool="Translate Toolkit - po2tmx"')
assert xmltext.index('adminlang')
assert xmltext.index('creationtoolversion')
assert xmltext.index('datatype')
assert xmltext.index('o-tmf')
assert xmltext.index('segtype')
assert xmltext.index('srclang')
def test_sourcelanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, sourcelanguage="xh")
print "The generated xml:"
print str(tmx)
header = tmx.document.find("header")
assert header.get("srclang") == "xh"
def test_targetlanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, targetlanguage="xh")
print "The generated xml:"
print str(tmx)
tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1]
#tag[0] will be the source, we want the target tuv
assert tuv.get("{%s}lang" % lisa.XML_NS) == "xh"
def test_multiline(self):
"""Test multiline po entry"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('First part and extra') == 'Eerste deel en ekstra'
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r'''msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r'''msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom"
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r'''msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('Hello "Everyone"') == 'Good day "All"'
assert tmx.translate(r'Use \".') == r'Gebruik \".'
def test_exclusions(self):
"""Test that empty and fuzzy messages are excluded"""
minipo = r'''#, fuzzy
msgid "One"
msgstr "Een"
msgid "Two"
msgstr ""
msgid ""
msgstr "Drie"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert "<tu" not in str(tmx)
assert len(tmx.units) == 0
def test_nonascii(self):
"""Tests that non-ascii conversion works."""
minipo = r'''msgid "Bรฉzier curve"
msgstr "Bรฉzier-kurwe"
'''
tmx = self.po2tmx(minipo)
print str(tmx)
assert tmx.translate(u"Bรฉzier curve") == u"Bรฉzier-kurwe"
class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):
"""Tests running actual po2tmx commands on files"""
convertmodule = po2tmx
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-l LANG, --language=LANG")
options = self.help_check(options, "--source-language=LANG", last=True)
| 33.43871
| 128
| 0.641134
| 636
| 5,183
| 5.198113
| 0.349057
| 0.019964
| 0.03539
| 0.051724
| 0.22686
| 0.185723
| 0.176951
| 0.176951
| 0.156987
| 0.156987
| 0
| 0.015672
| 0.224387
| 5,183
| 154
| 129
| 33.655844
| 0.806716
| 0.017557
| 0
| 0.289256
| 0
| 0.016529
| 0.349851
| 0.045087
| 0
| 0
| 0
| 0
| 0.157025
| 0
| null | null | 0.016529
| 0.041322
| null | null | 0.140496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cb5817de3a17f08a3afdfbe15a3bbd0fbe2d1d8
| 346
|
py
|
Python
|
setup.py
|
GeorgeDittmar/MarkovTextGenerator
|
df6a56e23051e1f263ba22889dc3b5d0dc03e370
|
[
"Apache-2.0"
] | 1
|
2021-11-26T15:49:31.000Z
|
2021-11-26T15:49:31.000Z
|
setup.py
|
GeorgeDittmar/Mimic
|
df6a56e23051e1f263ba22889dc3b5d0dc03e370
|
[
"Apache-2.0"
] | 1
|
2019-06-24T17:30:41.000Z
|
2019-06-26T04:53:00.000Z
|
setup.py
|
GeorgeDittmar/MarkovTextGenerator
|
df6a56e23051e1f263ba22889dc3b5d0dc03e370
|
[
"Apache-2.0"
] | 2
|
2020-05-04T07:57:17.000Z
|
2021-02-23T05:10:11.000Z
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='Mimik',
version='1.0',
description='Python framework for markov models',
author='George Dittmar',
author_email='[email protected]',
url='https://www.python.org/sigs/distutils-sig/',
packages=['distutils', 'distutils.command'],
)
| 26.615385
| 55
| 0.65896
| 41
| 346
| 5.536585
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 0.184971
| 346
| 12
| 56
| 28.833333
| 0.797872
| 0.057803
| 0
| 0
| 0
| 0
| 0.452308
| 0.070769
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cb6009fc34f03127073ead641d466f1b2a5c978
| 2,313
|
py
|
Python
|
app/search/hot_eval/hl_reportable.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
app/search/hot_eval/hl_reportable.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
app/search/hot_eval/hl_reportable.py
|
don4apaev/anfisa
|
2e4bdd83c584c0000f037413ccc1f9067c07fa70
|
[
"Apache-2.0"
] | null | null | null |
def evalRec(env, rec):
"""hl_reportable"""
return (len(set(rec.Genes) &
{
'ABHD12',
'ACTG1',
'ADGRV1',
'AIFM1',
'ATP6V1B1',
'BCS1L',
'BSND',
'CABP2',
'CACNA1D',
'CDC14A',
'CDH23',
'CEACAM16',
'CEP78',
'CHD7',
'CIB2',
'CISD2',
'CLDN14',
'CLIC5',
'CLPP',
'CLRN1',
'COCH',
'COL11A2',
'DIAPH1',
'DIAPH3',
'DMXL2',
'DNMT1',
'DSPP',
'EDN3',
'EDNRB',
'EPS8',
'EPS8L2',
'ESPN',
'ESRRB',
'EYA1',
'EYA4',
'GIPC3',
'GJB2',
'GJB6',
'GPSM2',
'GRHL2',
'GRXCR1',
'GSDME',
'HGF',
'HSD17B4',
'ILDR1',
'KCNE1',
'KCNQ1',
'KCNQ4',
'LARS2',
'LHFPL5',
'LOXHD1',
'LRTOMT',
'MARVELD2',
'MIR96',
'MITF',
'MSRB3',
'MT-RNR1',
'MT-TS1',
'MYH14',
'MYH9',
'MYO15A',
'MYO3A',
'MYO6',
'MYO7A',
'OSBPL2',
'OTOA',
'OTOF',
'OTOG',
'OTOGL',
'P2RX2',
'PAX3',
'PDZD7',
'PJVK',
'POU3F4',
'POU4F3',
'PRPS1',
'PTPRQ',
'RDX',
'RIPOR2',
'S1PR2',
'SERPINB6',
'SIX1',
'SLC17A8',
'SLC26A4',
'SLC52A2',
'SLITRK6',
'SMPX',
'SOX10',
'STRC',
'SYNE4',
'TBC1D24',
'TECTA',
'TIMM8A',
'TMC1',
'TMIE',
'TMPRSS3',
'TPRN',
'TRIOBP',
'TUBB4B',
'USH1C',
'USH1G',
'USH2A',
'WFS1',
'WHRN',
}
) > 0)
| 21.027273
| 32
| 0.253783
| 118
| 2,313
| 4.966102
| 0.983051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118221
| 0.601383
| 2,313
| 110
| 33
| 21.027273
| 0.517354
| 0.00562
| 0
| 0
| 0
| 0
| 0.234858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009174
| false
| 0
| 0
| 0
| 0.018349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cbd766d520e1888b731cf3cea3bb5f44d830c1f
| 520
|
py
|
Python
|
david/modules/artist/view.py
|
ktmud/david
|
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
|
[
"MIT"
] | 2
|
2016-04-07T08:21:32.000Z
|
2020-11-26T11:49:20.000Z
|
david/modules/artist/view.py
|
ktmud/david
|
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
|
[
"MIT"
] | null | null | null |
david/modules/artist/view.py
|
ktmud/david
|
4b8d6f804b73cdfa1a8ddf784077fa9a39f1e36f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint, request
from david.lib.template import st
from .model import Artist
bp = Blueprint('artist', __name__)
@bp.app_template_global('artists')
def artists():
return Artist.query.all()
@bp.route('/artist/<uid>/')
def intro(uid):
artist = Artist.get_or_404(uid)
return st('modules/artist/show.html', **locals())
@bp.route('/artist/<uid>/detail')
def detail(uid):
artist = Artist.get_or_404(uid)
return st('modules/artist/detailed.html', **locals())
| 23.636364
| 57
| 0.688462
| 74
| 520
| 4.702703
| 0.472973
| 0.04023
| 0.074713
| 0.091954
| 0.270115
| 0.270115
| 0.270115
| 0.270115
| 0.270115
| 0.270115
| 0
| 0.015556
| 0.134615
| 520
| 21
| 58
| 24.761905
| 0.757778
| 0.040385
| 0
| 0.133333
| 0
| 0
| 0.199195
| 0.104628
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.066667
| 0.6
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.