diff --git "a/1160.jsonl" "b/1160.jsonl" new file mode 100644--- /dev/null +++ "b/1160.jsonl" @@ -0,0 +1,409 @@ +{"seq_id": "35506749757", "text": "import tensorflow as tf\nfrom lib.architecture import Network\nfrom lib.optimization import *\n\nclass BuildLM(object):\n def __init__(self, graph, max_len, word_dim):\n self.network = Network(graph, max_len=max_len, word_dim=word_dim)\n self.loss = SoftmaxLoss(max_len=max_len, word_dim=word_dim)\n self.opt = Optimization()\n \n def build_lm_stacked_fc(self, graph):\n print(\"building lm_stacked_fc network...\")\n output = self.network.lm_stacked_fc_network()\n loss, perplexity = self.loss.cross_entropy(output)\n opt = self.opt.adam(loss, 0.002)\n # acc, train_summ, valid_summ = self.accuracy.sigmoid_accuracy(self.loss.labels, output)\n merged = tf.summary.merge_all()\n return output, loss, perplexity, opt, merged\n\n ", "repo_name": "kernelmachine/rnn-lm", "sub_path": "lib/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "lib.architecture.Network", "line_number": 7, "usage_type": "call"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "25719429725", "text": "\nimport googlemaps\nfrom key import GMAPS_KEY\n\ndef access_point():\n\n gmaps = googlemaps.Client(key = GMAPS_KEY)\n\n # sample cell tower\n celltowers = [{\n \"cellId\": 49761,\n \"locationAreaCode\": 26002,\n \"mobileCountryCode\": 20,\n \"mobileNetworkCode\": 10\n },\n {\n \"cellId\": 27111,\n \"locationAreaCode\": 1009,\n \"mobileCountryCode\": 204,\n \"mobileNetworkCode\": 16\n },\n {\n \"cellId\": 27161,\n \"locationAreaCode\": 1003,\n \"mobileCountryCode\": 204,\n \"mobileNetworkCode\": 16\n }\n ]\n\n locator = {\n \"homeMobileCountryCode\": 204,\n \"homeMobileNetworkCode\": 8,\n \"radioType\": \"lte\",\n \"carrier\": \"KPN\",\n \"considerIp\": \"true\",\n \"cellTowers\": {}\n }\n\n\n response = gmaps.geolocate(204, 8, 'gsm', 'Base/KPN', True, celltowers)\n print(response)\n\nif __name__ == '__main__':\n access_point()\n", "repo_name": "markmelnic/noise-pollution-mapper", "sub_path": "tests/test_access_point.py", "file_name": "test_access_point.py", "file_ext": "py", "file_size_in_byte": 919, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "33", "api": [{"api_name": "googlemaps.Client", "line_number": 7, "usage_type": "call"}, {"api_name": "key.GMAPS_KEY", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "33098250188", "text": "import random\nimport matplotlib.pyplot as plt\n\ndef random_list(start,stop,length):\n jishu = 50 #基数的意思或作为迭代次数,不是灵活的\n list_x = []\n for i in range(1,jishu+1):\n list_x.append(i)\n print(\"横坐标:\",list_x)\n\n list_y = []\n for i in range(jishu): #这里的jishu是自定义的,不是灵活的\n print(\"这是第 \",i+1,\" 次迭代\")\n if start <= stop:\n start,stop = (int(start),int(stop))\n else:\n stop, start = (int(start), int(stop))\n #print(start,stop)\n\n list = []\n for i in range(length):\n #print(i)\n if i == 0:\n x = random.randint(start,stop)\n print(\"随机数是:\" , x)\n j = random.randint(start,stop)\n list.append(j)\n #print(list)\n num_x = list.count(x)\n print(\"随机数出现的次数:\",num_x)\n show_rate = num_x/length\n print(\"出现率:\",show_rate)\n\n list_y.append(show_rate)\n print(\"----------------------------------\")\n print(\"纵坐标:\",list_y)\n\n plt.plot(list_x, list_y, label='NM')\n # plt.plot(x2, y2, label='Second Line')\n plt.xlabel('The number of iteration') # 横坐标标题\n plt.ylabel('The rate of appearance') # 纵坐标标题\n plt.title('The Distribution of one number \\n in random list',loc=\"center\") #图像标题\n # plt.title('Interesting Graph\\nCheck it out')\n plt.legend() # 显示Fisrt Line和Second Line(label)的设置\n plt.savefig('C:/Users/zhengyong/Desktop/1.png')\n plt.show()\n\nrandom_list(1,10,10000)\n", "repo_name": "ndnmonkey/simple_algorithms", "sub_path": "随机分布.py", "file_name": "随机分布.py", "file_ext": "py", "file_size_in_byte": 1642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "21931449737", "text": "import os\r\nimport requests\r\nimport soundfile as sf\r\nimport speech_recognition as sr\r\nfrom aiogram import Bot\r\nfrom aiogram.types import ContentType, Message\r\nfrom aiogram.dispatcher import Dispatcher\r\nfrom aiogram.utils import executor\r\n\r\n\r\nwith open('token.txt', 'r') as token_file:\r\n token = token_file.read()\r\n\r\nbot = Bot(token=token)\r\ndp = Dispatcher(bot)\r\nr = sr.Recognizer()\r\n\r\n\r\n@dp.message_handler(content_types=[ContentType.TEXT])\r\nasync def text_message(message: Message):\r\n text = await message.text\r\n with open('answer.txt', 'w') as answer:\r\n answer.write(text)\r\n\r\n\r\n@dp.message_handler(content_types=[ContentType.VOICE])\r\nasync def voice_message(message: Message):\r\n voice = await message.voice.get_file()\r\n path = voice.file_path\r\n oga_audio = os.getcwd() + '\\\\' + os.path.basename(path)\r\n wav_audio = oga_audio.replace('.oga', '.wav', 1)\r\n doc = requests.get(f'https://api.telegram.org/file/bot{token}/{path}')\r\n with open(f'{oga_audio}', 'wb') as f:\r\n f.write(doc.content)\r\n data, samplerate = sf.read(oga_audio)\r\n sf.write(wav_audio, data, samplerate)\r\n audio = sr.AudioFile(wav_audio)\r\n with audio as source:\r\n audio = r.record(source)\r\n try:\r\n await message.answer(\"Обработка сообщения...\")\r\n text = r.recognize_google(audio, language='ru_RU')\r\n await message.answer(\"Сообщение:\")\r\n await message.answer(str(text))\r\n except sr.UnknownValueError as e:\r\n await message.answer(\"Не удалось распознать сообщение.\")\r\n os.remove(oga_audio)\r\n os.remove(wav_audio)\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp, skip_updates=True)\r\n\r\n", "repo_name": "Flyer-DM/little_projects", "sub_path": "Listener_BOT/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1751, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "aiogram.Bot", "line_number": 14, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.Dispatcher", "line_number": 15, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 16, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 20, "usage_type": "name"}, {"api_name": "aiogram.types.ContentType.TEXT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "aiogram.types.ContentType", "line_number": 19, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 27, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "soundfile.read", "line_number": 35, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 36, "usage_type": "call"}, {"api_name": "speech_recognition.AudioFile", "line_number": 37, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 47, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 48, "usage_type": "call"}, {"api_name": "aiogram.types.ContentType.VOICE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "aiogram.types.ContentType", "line_number": 26, "usage_type": "name"}, {"api_name": "aiogram.utils.executor.start_polling", "line_number": 52, "usage_type": "call"}, {"api_name": "aiogram.utils.executor", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "1283859695", "text": "from django import template\n\nfrom spineyolo.models import SpineData\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_obj(pk, *attrs):\n try:\n \"\"\" Get object based on any number of attributes\n (attributes need to be children of one another) \"\"\"\n attrs = list(attrs)\n obj = getattr(SpineData.objects.get(pk=int(pk)), attrs.pop(0))\n for attr in attrs:\n obj = getattr(obj, attr, \"\")\n except ValueError:\n obj = \"\"\n return obj\n", "repo_name": "mikeusru/DjangoSpineYolo", "sub_path": "spineyolo/templatetags/template_tags.py", "file_name": "template_tags.py", "file_ext": "py", "file_size_in_byte": 499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "django.template.Library", "line_number": 5, "usage_type": "call"}, {"api_name": "django.template", "line_number": 5, "usage_type": "name"}, {"api_name": "spineyolo.models.SpineData.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "spineyolo.models.SpineData.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "spineyolo.models.SpineData", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "24266001124", "text": "from flask import (Flask, url_for, request,\n render_template, make_response,\n session, escape, redirect)\n\napp = Flask(__name__)\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n\n@app.route('/hello')\n@app.route('/hello/')\ndef hello_world(name=None):\n # return 'Hello world!'\n context = {'name': name}\n return render_template('hello.html', **context)\n\n@app.route('/')\ndef index():\n if 'session_id' in session and 'test_value' in session:\n return 'Logged in as {}, test value = {}'.\\\n format(escape(session['session_id']), \n escape(session['test_value']))\n return 'You are not logged in'\n\n@app.route('/post/')\ndef show_post_id(post_id):\n return 'Post id = {}'.format(str(post_id))\n\n@app.route('/path/')\ndef show_path_value(path_value):\n return 'Path : {}'.format(path_value)\n\n@app.route('/test')\ndef show_result():\n result = []\n result.append(url_for('hello_world'))\n result.append(url_for('show_post_id', post_id = 5566))\n result.append(url_for('show_path_value', path_value='so_strange_haha/1.0.2'))\n result.append(url_for('static', filename='style.css'))\n return '
'.join(result)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n session['session_id'] = request.form['username'] \n session['test_value'] = request.form['test_value']\n return redirect(url_for('index'))\n return '''\n
\n

\n

\n

\n

\n '''\n\n@app.route('/set_cookie')\ndef set_cookie():\n resp = make_response(render_template('hello.html', name='cookie'))\n resp.set_cookie('username', 'moonblack')\n return resp\n\n@app.route('/get_cookie')\ndef get_cookie():\n username = request.cookies.get('username', 'no_value')\n return username\n", "repo_name": "nk910216/flask_start_practice", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.escape", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.escape", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.cookies.get", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.cookies", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "32691882745", "text": "#Simulating a self-avoiding walk on 2D square lattice\r\n\r\nimport numpy as np\r\nimport random\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef take_a_step(position):\r\n \r\n #walker takes a step\r\n \r\n steps = [[-1,0],[1,0],[0,1],[0,-1]]\r\n new_step = np.array(random.choice(steps))\r\n new_pos = position + new_step\r\n \r\n return new_pos \r\n\r\n\r\nN = 30 #number of steps\r\n\r\nsteps_til_trap = np.array([])\r\nfor j in range(0,1): #this line allows for multiple SAWs to be produced at once\r\n\r\n x = np.array([[0,0]])\r\n pos = x\r\n pos_track = x\r\n a = 0\r\n for i in range(0,N):\r\n pos = pos_track[-1,:]\r\n pos = take_a_step(pos)\r\n \r\n #SELF - AVOIDANCE CONSTRAINT\r\n if np.any(np.all(pos_track == pos ,axis = 1) == True):\r\n continue\r\n else:\r\n pos_track = np.vstack((pos_track,pos))\r\n\r\nplt.figure(figsize = (6,6))\r\nfor i in range(1,len(pos_track[:,0])):\r\n plt.plot([pos_track[i-1,0],\r\n pos_track[i,0]],\r\n [pos_track[i-1,1],\r\n pos_track[i,1]],\r\n color = 'k',\r\n linewidth = 2.5)\r\nplt.axis('equal')\r\nplt.grid(color='k', linestyle='--', linewidth=0.5,alpha = 0.4)\r\nplt.show()", "repo_name": "alex21347/Random_walk_generators", "sub_path": "simple_SAW.py", "file_name": "simple_SAW.py", "file_ext": "py", "file_size_in_byte": 1202, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "24495248205", "text": "file = \"/home/recodera/MRI_dasa/MRI_02/Loper/Loper/LosiActa.pdf\"\n\ndef pdf_tab_id(file):\n from tabula import read_pdf\n from tabulate import tabulate\n import os\n import re\n import json\n import pathlib\n \n fce_path_file = pathlib.Path(file)\n fce_path = fce_path_file.parent\n print(f'fce_path is: {fce_path}')\n ls_df = read_pdf(file, pages=\"all\")\n r = re.compile(r\"[01][.]{1}\\d{2}\")\n ls_id = []\n for count, tb in enumerate(ls_df):\n ls_df[count].to_csv('df_csv.csv')\n with open('df_csv.csv') as f:\n s = f.read()\n dec = re.findall(r, s)\n ls_id.append(dec)\n\n js_str = json.dumps(ls_id)\n jsonFile = open(f\"{fce_path}/id.json\", \"w\")\n jsonFile.write(js_str)\n jsonFile.close()\n\npdf_tab_id(file)", "repo_name": "ReCodeRa/MRI_mirta", "sub_path": "pdf_f.py", "file_name": "pdf_f.py", "file_ext": "py", "file_size_in_byte": 775, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "tabula.read_pdf", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "36598593952", "text": "import matplotlib.pyplot as pyplot\n\n\nlabels = ('Python', 'Ruby', 'Julia', 'Java', 'PHP')\nindex = (1, 2, 3, 4, 5) # provides locations on x axis\nsizes = [45, 30, 15, 10, 22]\n\n# Set up the bark chart\npyplot.bar(index, sizes, tick_label=labels)\n\n# Configure the layout\npyplot.ylabel('Usage')\npyplot.xlabel('Programming Languages')\n\n# Display the chart\npyplot.show()\n", "repo_name": "uunnxx/how-to", "sub_path": "python/plot/bar_charts.py", "file_name": "bar_charts.py", "file_ext": "py", "file_size_in_byte": 364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "matplotlib.pyplot.bar", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "1333640831", "text": "from django import forms\nfrom .models import Comment\n# from django.contrib.auth.models import User\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n exclude = ['username', 'image']\n widgets = {\n 'content': forms.TextInput(attrs={'placeholder':'Add a comment'}) #names to be similar as the fieldnames\n }\n ", "repo_name": "viisualworks/instanoir", "sub_path": "gram/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "17666936066", "text": "import socket, argparse, threading\n\n # Accept a new socket and receive the nickname\ndef socket_accept():\n\t\n\tconn, address = s.accept() # Three-way handshake - This is a TCP connection with the server\n\t# Receive the nickname\n\tnickname = conn.recv(1024)\n\t# Append address in clients list\n\tif address not in clients:\n\t\t#clients.append((address[0]+':'+str(address[1])))\n\t\tclients.append(conn)\n\tprint(\"Connection has been established | \" + \"IP \" + address[0] + \\\n\t\t\" | Port \" + str(address[1]) + ' ' + nickname)\n\t#client(conn)\n\t#conn.close()\n\t# Go for data\n\t# Start a thread to keep receiving client data\n\tthread_client = threading.Thread(target = data, args=[nickname, conn])\n\tthread_client.start()\n\ndef data(nickname, conn):\n\twhile True:\t\n\t\t# Reveive the data from client\n\t\tdata = conn.recv(4096)\n\t\t#conn.sendall(data)\n\t\tif data:\n\t\t\tprint(\"%s said %s\" % (nickname, repr(data)))\n\t\t\tbcast(conn, data, nickname) # Call for broadcast\n\t\t\t\t\t\t # I tried to put this function here instead of calling it but it don't work because it's\n\t\t\t\t\t\t # always on the same client socket and we need to change socket to send to other clients\n\t\t\t\t\t\t # sendto(data, address) also didn't work\n\t\tif not data:\t\n\t\t\tbreak\n\n # Broadcast \ndef bcast(client_socket, message, client_nickname):\n\t# We don't want to receive what we sent\t\n\tfor i in clients:\n\t\tif i == client_socket:\n\t\t\tpass\n\t\t# Sent my message to other clients\n\t\telse:\n\t\t\tprint(clients)\n\t\t\t#i.sendall(client_nickname)\n\t\t\t#i.sendall()\n\t\t\t# New line on the client side\n\t\t\ti.sendall(client_nickname + ': ' + message)\t\n\nif __name__ == '__main__':\n\t# Args for host and port\n\tparser = argparse.ArgumentParser(description='My TCP server')\n\tparser.add_argument('host', help='Interface the server listens at')\n\tparser.add_argument('-p', metavar='PORT', type=int, help='TCP PORT (default 9999)')\t\n\targs = parser.parse_args()\n\thost = args.host\n\tport = args.p\n\n\t # Client List\t\n\tclients = []\n\n\t # Create socket\n\ttry:\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ts.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n\texcept socket.error as msg:\n\t\tprint(\"Socket creation error: \" + str(msg))\n\n\t # Bind the sockets\n\ttry:\n\t\tprint(\"Binding socket to port: \" + str(port))\n\t\ts.bind((host, port))\n\t\ts.listen(5) #number of connections\n\texcept socket.error as msg:\n\t\tprint(\"Socket binding error: \" + str(msg) + \"\\n\" + \"Retrying...\")\n\n\n\t\n\twhile True:\n\t\tsocket_accept()\n", "repo_name": "5peedmanual/SocketChat", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "threading.Thread", "line_number": 19, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 52, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 64, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 64, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 64, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 65, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 65, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 66, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "40867723565", "text": "\"\"\" ukol-08: Prodej vstupenek\nVytvoř program na prodej vstupenek do letního kina. Ceny vstupenek jsou v tabulce níže.\n\nDatum\tCena\n1. 7. 2021 - 10. 8. 2021\t250 Kč\n11. 8. 2021 - 31. 8. 2021\t180 Kč\nMimo tato data je středisko zavřené.\n\nTvůj program se nejprve zeptá uživatele na datum a počet osob, pro které uživatel chce vstupenky koupit. Uživatel zadá datum ve středoevropském formátu. Převeď řetězec zadaný uživatelem na datum pomocí funkce datetime.strptime().\n\nPokud by uživatel zadal příjezd mimo otevírací dobu, vypiš, že letní kino je v té době uzavřené. Pokud je letní kino otevřené, spočítej a vypiš cenu za ubytování.\n\nData lze porovnávat pomocí známých operátorů <, >, <=, >=, ==, !=. Tyto operátory můžeš použít v podmínce if. Níže vidíš příklad porovnání dvou dat. Program vypíše text \"První datum je dřívější než druhé datum.\".\n\nfrom datetime import datetime\nprvni_udalost = datetime(2021, 7, 1)\ndruha_udalost = datetime(2021, 7, 3)\nif prvni_datum < druhe_datum:\n print(\"Druhá událost se stala po první události\") \"\"\"\n\nfrom datetime import datetime\n\ndatum = input('Zadejte datum, na které chcete pořídit vstupenky: ')\ndate = datetime.strptime(datum, '%d.%m.%Y')\n#print(date)\n\ndatum_otevreni_kina = datetime(2021,7,1)\ndatum_zmena_ceny = datetime(2021,8,10)\ndatum_uzavreni_kina = datetime(2021,8,31)\n\n#pro možnost výpisu otevírací doby ve středoevropském formátu:\noteviraji = datum_otevreni_kina.strftime('%d.%m.%Y') \nzaviraji = datum_uzavreni_kina.strftime('%d.%m.%Y')\n\ncena = 0\npocet_osob = 0\n\nif date < datum_otevreni_kina or date > datum_uzavreni_kina:\n print(f'V tento den je kino uzavřeno. Kino je otevřeno od {oteviraji} do {zaviraji}.')\nelse:\n pocet_osob = int(input('Zadejte počet vstupenek: '))\n if date <= datum_zmena_ceny:\n cena = 250\n else:\n cena = 180\n\n#print(cena)\n\ncelkova_cena = pocet_osob*cena\nif celkova_cena != 0:\n print(f'Cena za {pocet_osob} vstupenek je {celkova_cena} Kč.')", "repo_name": "LenkaDostalova/Python-jaro-22", "sub_path": "ukol-08_Vstupenky.py", "file_name": "ukol-08_Vstupenky.py", "file_ext": "py", "file_size_in_byte": 2034, "program_lang": "python", "lang": "cs", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "40151395215", "text": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n# 形态转换\n\nimg = cv2.imread(r'./pic/1.jpg')\n\n'''\n# (1)\nkernel_0 = np.ones((9, 9), np.uint8)\nkernel_1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))\nkernel_2 = cv2.getStructuringElement(cv2.MORPH_CROSS, (9, 9))\nkernels = [kernel_0, kernel_1, kernel_2]\n\n# 腐蚀\n# plt.figure(figsize = (20, 20))\n# for i in range(3):\n# img_copy = img.copy()\n# img_copy = cv2.erode(img_copy, kernels[i], iterations = 3)\n# plt.subplot(1, 3, i+1)\n# plt.imshow(img_copy)\n# plt.axis('off')\n# plt.show()\n\n\n# (2)扩张\n# kernel = np.ones((9, 9), np.uint8)\n# img_dilate = cv2.dilate(img, kernel, iterations = 3)\n# plt.figure(figsize = (20, 10))\n# plt.subplot(1, 2, 1); plt.imshow(img, cmap=\"gray\")\n# plt.subplot(1, 2, 2); plt.imshow(img_dilate, cmap=\"gray\")\n# plt.show()\n'''\n\n\n\n# (3)\nkernel = np.ones((9, 9), np.uint8)\nimg_open = cv2.morphologyEx(img, op= cv2.MORPH_OPEN, kernel=kernel)\nimg_close = cv2.morphologyEx(img, op= cv2.MORPH_CLOSE, kernel=kernel)\nimg_grad = cv2.morphologyEx(img, op= cv2.MORPH_GRADIENT, kernel=kernel)\nimg_tophat = cv2.morphologyEx(img, op= cv2.MORPH_TOPHAT, kernel=kernel)\nimg_blackhat = cv2.morphologyEx(img, op= cv2.MORPH_BLACKHAT, kernel=kernel)\n# Plot the images\nimages = [img, img_open, img_close, img_grad,\n img_tophat, img_blackhat]\nfig, axs = plt.subplots(nrows = 2, ncols = 3, figsize = (15, 15))\nfor ind, p in enumerate(images):\n ax = axs[ind//3, ind%3]\n ax.imshow(p, cmap = 'gray')\n ax.axis('off')\nplt.show()", "repo_name": "Shawnlau63/OpenCVtest", "sub_path": "transform.py", "file_name": "transform.py", "file_ext": "py", "file_size_in_byte": 1533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.MORPH_CLOSE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.MORPH_GRADIENT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.MORPH_TOPHAT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.morphologyEx", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.MORPH_BLACKHAT", "line_number": 43, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "70862363615", "text": "from .db import get_connection\nfrom models.compra import Compra\n\nimport datetime\nmydb = get_connection()\n\nclass Carrito:\n\n def __init__(self, id_producto, cantidad, nombre_producto=None, sub_total=None, total=None, id_compra=None, id=None):\n self.id = id\n self.id_compra = id_compra \n self.id_producto = id_producto\n self.cantidad = cantidad\n self.nombre_producto = nombre_producto\n self.sub_total = sub_total\n self.total = total\n\n def save(self):\n # Create a New Object in DB\n if self.id is None:\n fecha_compra=datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n print(self.id_compra) \n if self.id_compra != None:\n compra = Compra.get(self.id_compra)\n else:\n print(\"Se crea la compra\")\n compra = Compra(fecha_compra) \n compra.save()\n with mydb.cursor() as cursor:\n sql = \"INSERT INTO carrito_productos(id_producto, id_compra, cantidad) VALUES(%s, %s, %s)\"\n val = (self.id_producto, compra.id_compra, self.cantidad)\n cursor.execute(sql, val)\n mydb.commit()\n self.id = cursor.lastrowid\n return self.id\n # Update an Object\n else:\n with mydb.cursor() as cursor:\n sql = \"UPDATE carrito_compra SET id_producto = %s, id_compra = %s, cantidad = %s WHERE id = %s\"\n val = (self.id_producto, self.id_compra, self.cantidad, self.id)\n cursor.execute(sql, val)\n mydb.commit()\n return self.id\n \n def delete(self):\n with mydb.cursor() as cursor:\n sql = f\"DELETE FROM carrito_productos WHERE id = { self.id }\"\n cursor.execute(sql)\n mydb.commit()\n return self.id\n \n @staticmethod\n def get(id):\n with mydb.cursor(dictionary=True) as cursor:\n sql = f\"SELECT id_producto, id_compra, cantidad FROM carrito_productos WHERE id = { id }\"\n cursor.execute(sql)\n result = cursor.fetchone()\n print(result)\n carrito_productos = Carrito(result[\"id_producto\"], result[\"id_compra\"], result[\"cantidad\"], id)\n return carrito_productos\n \n @staticmethod\n def get_all(id_compra):\n carrito_productos = []\n with mydb.cursor(dictionary=True) as cursor:\n sql = f\"SELECT c.id , c.id_producto, c.id_compra, c.cantidad, p.nombre_producto, p.precio_producto FROM carrito_productos c inner join producto p on p.id_producto = c.id_producto where id_compra = { id_compra } ;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n for item in result:\n cantidad = item[\"cantidad\"]\n subtotal = item[\"precio_producto\"] * cantidad\n total = subtotal + (subtotal * 0.16)\n carrito_productos.append(Carrito(item[\"id_producto\"], item[\"cantidad\"],item[\"nombre_producto\"], subtotal, total, item[\"id_compra\"], item[\"id\"]))\n return carrito_productos\n \n @staticmethod\n def count_all():\n with mydb.cursor() as cursor:\n sql = f\"SELECT COUNT(id) FROM carrito_productos\"\n cursor.execute(sql)\n result = cursor.fetchone()\n return result[0]\n \n def __str__(self):\n return f\"{ self.id } - { self.id_producto }\"", "repo_name": "Eduardo188s/Abarrotes_Maiker", "sub_path": "app/models/carrito.py", "file_name": "carrito.py", "file_ext": "py", "file_size_in_byte": 3504, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "db.get_connection", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.compra.Compra.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.compra.Compra", "line_number": 24, "usage_type": "name"}, {"api_name": "models.compra.Compra", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "672100151", "text": "# Импортируем библиотеку, соответствующую типу нашей базы данных \nimport sqlite3\n\n# Создаем соединение с нашей базой данных\n# В нашем примере у нас это просто файл базы\nconn = sqlite3.connect('Chinook_Sqlite.sqlite')\n\n# Создаем курсор - это специальный объект который делает запросы и получает их результаты\ncursor = conn.cursor()\n\n# Делаем SELECT запрос к базе данных, используя обычный SQL-синтаксис\ncursor.execute(\"SELECT Name FROM Artist ORDER BY Name LIMIT 3\")\n\n# Получаем результат сделанного запроса\nresults = cursor.fetchall()\nresults2 = cursor.fetchall()\n\nprint(results) # [('A Cor Do Som',), ('Aaron Copland & London Symphony Orchestra',), ('Aaron Goldberg',)]\nprint(results2) # []\n\n# Делаем INSERT запрос к базе данных, используя обычный SQL-синтаксис\ncursor.execute(\"insert into Artist values (Null, 'A Aagrh!') \")\n\n# Если мы не просто читаем, но и вносим изменения в базу данных - необходимо сохранить транзакцию\nconn.commit()\n\n# Проверяем результат\ncursor.execute(\"SELECT Name FROM Artist ORDER BY Name LIMIT 3\")\nresults = cursor.fetchall()\nprint(results) # [('A Aagrh!',), ('A Cor Do Som',), ('Aaron Copland & London Symphony Orchestra',)]\n\n# Не забываем закрыть соединение с базой данных\nconn.close()", "repo_name": "burakadd/repository", "sub_path": "LearningSQL/sqlite.py", "file_name": "sqlite.py", "file_ext": "py", "file_size_in_byte": 1683, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "23672517268", "text": "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport sklearn\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nimport joblib\r\n#中文显示\r\nmpl.rcParams['font.sans-serif'] = [u'SimHei']\r\nmpl.rcParams['axes.unicode_minus'] = False\r\n\r\n#path=\"demo.xlsx\"\r\npath=\"rundata.xlsx\"\r\n#k折验证\r\nk=10\r\ndata=pd.read_excel(path) #数据读入\r\nname=list(data.columns.values)\r\nData=np.array(data)\r\nnp.random.shuffle(Data)\r\nnum_val_sample=len(Data)//k\r\nfeature_num=22\r\nprint(Data.shape)\r\nscore_list=[]\r\nbest_forest=RandomForestRegressor(n_estimators=10,max_leaf_nodes=17)\r\nbest_score=0\r\nbest_score_list=[]\r\n#空值处理\r\nfor i in range(k):\r\n x_test=Data[i*num_val_sample:(i+1)*num_val_sample,0:feature_num]\r\n y_test=Data[i*num_val_sample:(i+1)*num_val_sample,feature_num:]\r\n x_train=np.concatenate([Data[:i*num_val_sample,0:feature_num],Data[(i+1)*num_val_sample:,0:feature_num]],axis=0)\r\n y_train = np.concatenate([Data[:i * num_val_sample, feature_num:], Data[(i + 1) * num_val_sample:, feature_num:]], axis=0)\r\n #随机森林预测\r\n forest=RandomForestRegressor(n_estimators=150,max_leaf_nodes=10)\r\n forest.fit(x_train,y_train)\r\n y_pred=forest.predict(x_test)\r\n score=sklearn.metrics.r2_score(y_test, y_pred)\r\n if best_score\\n\"\"\".format(\r\n self.name, self.expected, self.status)\r\n a = self.action.toXml()\r\n return \"\".join((n, a, \"\\n\")) \r\n\r\n def toHtml(self, short=True, cssClass=None):\r\n \"\"\"Return a HTML respresentation of the TestStep.\"\"\"\r\n if short:\r\n return self._shortHtml(cssClass)\r\n else:\r\n return self._longHtml(cssClass)\r\n\r\n def _shortHtml(self, cls):\r\n \"\"\"Returns the short (table row) HTML representation of TestStep.\"\"\"\r\n if cls:\r\n r = \"\"\"\"\"\".format(cls)\r\n else:\r\n r = \"\"\r\n n = \"{}\".format(self.name) \r\n a = \"{}\".format(str(self.action))\r\n e = \"{}\".format(self.expected)\r\n s = \"{}\".format(self.status)\r\n return \"\".join((r, n, a, e, s, \"\"))\r\n\r\n def _longHtml(self, cls):\r\n \"\"\"Returns the longer and prettier HTML representation of TestStep.\"\"\"\r\n if cls:\r\n d = \"\"\"
\"\"\".format(cls)\r\n else:\r\n d = \"
\"\r\n n = \"

{}

\".format(self.name)\r\n a = \"{}
\".format(str(self.action))\r\n e = \"Expected status: {}
\".format(self.expected)\r\n s = \"Status: {}
\".format(self.status)\r\n o = \"Output:
\\n
{}
\".format(self.action.output)\r\n return \"\\n\".join((d, n, a, e, s, o, \"
\"))\r\n\r\n def _evaluate(self):\r\n \"\"\"Evaluates the execution of the test step.\"\"\"\r\n self.status = TestStatus.FAIL\r\n # we evaluate the action's return code value\r\n if self.action.returncode == 0:\r\n status = TestStatus.PASS \r\n else: \r\n status = TestStatus.FAIL \r\n # now we compare it to expected value\r\n # test case passes only when: \r\n # 1. expected=pass and status=pass\r\n # 2. expected=expected-fail and status=fail\r\n if (status == TestStatus.PASS and self.expected == TestStatus.PASS):\r\n self.status = status\r\n elif (status == TestStatus.FAIL and self.expected == TestStatus.XFAIL):\r\n self.status = TestStatus.PASS\r\n return self.status\r\n\r\n def execute(self, **kwargs):\r\n \"\"\" \"\"\"\r\n self.action.execute(**kwargs)\r\n if self.action.isAutomated():\r\n return self._evaluate(), self.action.output\r\n else:\r\n return TestStatus.NOT_TESTED, \"\"\r\n\r\nclass _TestStepJsonEncoder(json.JSONEncoder):\r\n \"\"\"Custom JSON encoder for TestStep class\"\"\"\r\n\r\n def default(self, obj):\r\n if isinstance(obj, TestStep):\r\n d = dict()\r\n d[\"name\"] = obj.name\r\n d[\"expected\"] = str(obj.expected)\r\n d[\"action\"] = obj.action.toJson()\r\n d[\"status\"] = str(obj.status)\r\n return d\r\n return json.JSONEncoder.default(self, obj)\r\n\r\nclass TestStepJsonDecoder(json.JSONDecoder):\r\n\r\n def decode(self, jsontext):\r\n stepDict = json.loads(jsontext)\r\n expected = TestStatus.NOT_TESTED\r\n name = \"Untitled test step\"\r\n if \"name\" in stepDict:\r\n name = stepDict[\"name\"]\r\n if \"expected\" in stepDict:\r\n expected = toTestStatus(stepDict[\"expected\"])\r\n if \"action\" in stepDict:\r\n action = ActionJsonDecoder().decode(stepDict[\"action\"])\r\n if \"status\" in stepDict:\r\n status = toTestStatus(stepDict[\"status\"])\r\n else:\r\n raise Error(\"TestStep needs an action...\")\r\n return TestStep(name, action, expected, status) \r\n \r\n# TESTING ####################################################################\r\ndef runtests():\r\n print( \"Starting unit tests...\")\r\n s = TestStep(\"a test step\")\r\n print((str(s)))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n act = AutomatedAction(\"test/scripts/test.py\", \"arg1\")\r\n s = TestStep(\"a different test step\", act, TestStatus.XFAIL)\r\n j = s.toJson()\r\n print(j)\r\n ts = TestStepJsonDecoder().decode(j)\r\n print((\"type: {} \\ndata='{}'\".format(type(ts), ts)))\r\n print(\"Executing...\")\r\n res, output = s.execute()\r\n print((str(s)))\r\n print((\"RC={} Output:\\n'{}'\".format(str(res), output)))\r\n print((\"XML='{}'\".format(s.toXml())))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n act2 = AutomatedAction(\"test/scripts/test.py\")\r\n s2 = TestStep(\"another test step\", act2, TestStatus.PASS)\r\n print(\"Executing again...\")\r\n rs, out = s2.execute()\r\n print((str(s2)))\r\n print((\"RC={} Output:\\n'{}'\".format(str(rs), out)))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n act = AutomatedAction(\"test/scripts/tet.py\")\r\n s2 = TestStep(\"false test step\", act, TestStatus.XFAIL)\r\n print(\"Executing again...\")\r\n rs, out = s2.execute()\r\n print((str(s2)))\r\n print((\"RC={} Output:\\n'{}'\".format(str(rs), out)))\r\n print((\"XML='{}'\".format(s2.toXml())))\r\n print((\"HTML='{}'\".format(s2.toHtml())))\r\n print((\"HTML='{}'\".format(s2.toHtml(False))))\r\n print((\"HTML='{}'\".format(s2.toHtml(cssClass=\".teststep\"))))\r\n print((\"HTML='{}'\".format(s2.toHtml(False, cssClass=\".teststep\"))))\r\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n print(\"Stop\")\r\n\r\nif __name__ == '__main__':\r\n print(__doc__)\r\n runtests()\r\n", "repo_name": "mraitmaier/pyrus", "sub_path": "core/teststep.py", "file_name": "teststep.py", "file_ext": "py", "file_size_in_byte": 7570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pyrus.core.action.NoOpAction", "line_number": 27, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.PASS", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 28, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.NOT_TESTED", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 29, "usage_type": "name"}, {"api_name": "pyrus.core.action.NoOpAction", "line_number": 49, "usage_type": "name"}, {"api_name": "pyrus.core.action.ManualAction", "line_number": 49, "usage_type": "name"}, {"api_name": "pyrus.core.action.AutomatedAction", "line_number": 49, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 70, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.FAIL", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 118, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.PASS", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 121, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.FAIL", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 123, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.PASS", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 128, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.FAIL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 130, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.XFAIL", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus.PASS", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 131, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.TestStatus.NOT_TESTED", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 140, "usage_type": "name"}, {"api_name": "json.JSONEncoder", "line_number": 142, "usage_type": "attribute"}, {"api_name": "json.JSONEncoder.default", "line_number": 153, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 153, "usage_type": "attribute"}, {"api_name": "json.JSONDecoder", "line_number": 155, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.NOT_TESTED", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 159, "usage_type": "name"}, {"api_name": "pyrus.core.teststatus.toTestStatus", "line_number": 164, "usage_type": "call"}, {"api_name": "pyrus.core.action.ActionJsonDecoder", "line_number": 166, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.toTestStatus", "line_number": 168, "usage_type": "call"}, {"api_name": "pyrus.core.error.Error", "line_number": 170, "usage_type": "call"}, {"api_name": "pyrus.core.action.AutomatedAction", "line_number": 179, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.XFAIL", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 180, "usage_type": "name"}, {"api_name": "pyrus.core.action.AutomatedAction", "line_number": 191, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.PASS", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 192, "usage_type": "name"}, {"api_name": "pyrus.core.action.AutomatedAction", "line_number": 198, "usage_type": "call"}, {"api_name": "pyrus.core.teststatus.TestStatus.XFAIL", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pyrus.core.teststatus.TestStatus", "line_number": 199, "usage_type": "name"}]} +{"seq_id": "43541239162", "text": "from faker import Faker\nfrom settings.webdriver import Driver\nfrom page_objects.base_page import BasePage\nfrom locators.home_page_locators import HomePageLocators\nfrom locators.auth_page_locators import AuthLocators\n\n\"\"\"Test to check whether each item in dropdown list can be selected\"\"\"\n\n\nclass TestAuth():\n\n def setup(self):\n self.driver = Driver()\n self.bp = BasePage(self.driver)\n fake = Faker()\n self.driver.navigate('http://automationpractice.com/index.php')\n self.bp.click(HomePageLocators.SIGN_IN)\n self.bp.set(AuthLocators.EMAIL_CREATE, fake.email())\n self.bp.click(AuthLocators.SUBMIT_CREATE)\n\n def test_days_dropdown(self):\n self.bp.get_items_from_dropdown(AuthLocators.DAYS)\n\n for item in self.bp.items:\n self.bp.validate_item_is_selected(AuthLocators.DAYS, item)\n\n def test_month_dropdown(self):\n self.bp.get_items_from_dropdown(AuthLocators.MONTHS)\n\n for item in self.bp.items:\n self.bp.validate_item_is_selected(AuthLocators.MONTHS, item)\n\n # def test_year_dropdown(self):\n # self.bp.get_items_from_dropdown(AuthLocators.YEARS)\n #\n # for item in self.bp.items:\n # self.bp.validate_item_is_selected(AuthLocators.YEARS, item)\n\n def test_state_dropdown(self):\n self.bp.get_items_from_dropdown(AuthLocators.STATE)\n\n for item in self.bp.items:\n self.bp.validate_item_is_selected(AuthLocators.STATE, item)\n\n def teardown(self):\n self.driver.teardown()\n", "repo_name": "piotrkorzen/automation-sample", "sub_path": "tests/test_auth_page_dropdowns.py", "file_name": "test_auth_page_dropdowns.py", "file_ext": "py", "file_size_in_byte": 1538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "settings.webdriver.Driver", "line_number": 13, "usage_type": "call"}, {"api_name": "page_objects.base_page.BasePage", "line_number": 14, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 15, "usage_type": "call"}, {"api_name": "locators.home_page_locators.HomePageLocators.SIGN_IN", "line_number": 17, "usage_type": "attribute"}, {"api_name": "locators.home_page_locators.HomePageLocators", "line_number": 17, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.EMAIL_CREATE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 18, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.SUBMIT_CREATE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 19, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.DAYS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 22, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.DAYS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 25, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.MONTHS", "line_number": 28, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 28, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.MONTHS", "line_number": 31, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 31, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.STATE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 40, "usage_type": "name"}, {"api_name": "locators.auth_page_locators.AuthLocators.STATE", "line_number": 43, "usage_type": "attribute"}, {"api_name": "locators.auth_page_locators.AuthLocators", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "18782683684", "text": "''' This file serves a library. It contains all the basic functions to obtain an approximate version\n of the Lindblad master equation and compute the evolved quantum state after the interction with\n the environment. The approach used is based on the following two works:\n - Steinbach, J. and Garraway, B. M. and Knight, P. L., High-order unraveling of\n master equations for dissipative evolution, Phys. Rev. A, 51(4), 3302, 1995.\n - Yu Cao and Jianfeng Lu, Structure-preserving numerical schemes for Lindblad\n equations, arxiv 2103.01194, 2021.\n'''\n\nimport numpy as np\nimport qutip\nimport scipy.linalg\nfrom lindblad import *\n\ndef create_L_L_dagg(N,RM_D):\n ''' This function creates the Lindblad operators starting from a given Kossakowski matrix.\n \n Parameters\n ----------\n N : int\n Dimension of the system of interest\n RM_D : ndarray\n Kossakowsky matrix: positive matrix with unit trace.\n This matrix can be sampled from the Ginibre ensemble using the QuTip library as follows:\n RM_D = np.array(qutip.rand_dm_ginibre(:math:`(N^2-1)`, rank=None))\n \n Results\n -------\n L : ndarray\n Array of dimension :math:`(N^2 \\times N \\times N)` of :math:`(N^2)` Lindblad operators\n L_dagg : ndarray\n Conjugate transpose of the array L\n '''\n\n K = N*RM_D\n\n # Diagonalize the normalized Wishart matrix -> Kossakowski matrix\n eigval_K, eigvect_K = np.linalg.eigh(K)\n\n # Build Lindblad operators as an array of three indices: N*2 - 1 operators of dimension (N x N)\n F = F_matr_base_hs(N)\n L = np.zeros((N**2 -1,N,N), dtype=complex)\n L_dagg = np.zeros((N**2 -1,N,N), dtype=complex)\n\n for k in range(N**2 -1):\n l = np.zeros((N,N), dtype=complex)\n for m in range(N**2 -1):\n l = l + eigvect_K[m,k]*F[m+1] # You have to exclude the first element of F, Id(N).\n l = l*np.sqrt(eigval_K[k])\n L[k] = l\n L_dagg[k] = (np.conjugate(l)).T\n \n return L, L_dagg\n\ndef kraus(oper,state):\n ''' This function computes the Kraus superoperator associated with the input operator (oper) and\n gives as output the evolved input state.\n Parameters\n ----------\n oper : ndarray\n Input operator\n state : ndarray\n Input quantum state\n\n Results\n -------\n result : ndarray\n Evolved quantum state after the action of the input operator\n '''\n result = oper @ state @ (np.conjugate(oper)).T\n return result\n\ndef H_eff(H,lind):\n ''' This function computes the effective Hamiltonian as defined in\n \"Phys. Rev. A, 51(4), 3302, 1995\".\n \n Parameters\n ----------\n H : ndarray\n Hamiltonian of the system of interest\n lind : ndarray\n Lindblad operators associated to the Markovian noise\n\n Results\n -------\n result : ndarray\n Effective Hamiltonian\n '''\n num_lind = lind.shape[0]\n NN = lind.shape[1]\n sum_lind = np.zeros((NN,NN), dtype=complex)\n for k in range(num_lind):\n sum_lind += (np.conjugate(lind[k])).T @ lind[k]\n result = H + (1/2j)*sum_lind\n return result\n\ndef Lind_J(Heff,state):\n ''' This function computes the term L_J of the Lindbladian \n as reported in \"Phys. Rev. A, 51(4), 3302, 1995\".\n \n Parameters\n ---------\n Heff : ndarray\n Effective Hamiltonian of the system\n state : ndarray\n Input quantum state\n\n Results\n -------\n result : ndarray\n Output state after the action of the term L_J\n '''\n J = -1j*Heff\n result = J @ state + state @ (np.conjugate(J)).T\n return result\n\ndef Lind_L(lind,state):\n ''' This function computes the term L_L of the Lindbladian\n as reported in \"Phys. Rev. A, 51(4), 3302, 1995\".\n \n Parameters\n ----------\n lind : ndarray\n Lindblad operators associated to the Markovian noise\n state : ndarray\n Input quantum state\n\n Results\n -------\n result : ndarray\n Output state after the action of the term L_L\n \n '''\n num_lind = lind.shape[0]\n N = state.shape[0]\n result = np.zeros((N,N), dtype=complex)\n for k in range(num_lind):\n result += lind[k] @ state @ (np.conjugate(lind[k])).T\n return result\n\ndef neg_ent(state,N):\n ''' This function computes the negativity of entanglement of the input state.\n\n Parameters\n ----------\n N : int\n Dimension of the subsystem of the joint system of interest.\n state : ndarray\n Input state\n \n Returns\n -------\n result : float\n The negativity of entanglement.\n '''\n state_qutip = qutip.Qobj(state, dims = [[N,N],[N,N]], shape = (N**2,N**2))\n state_transpose_B = np.array(qutip.partial_transpose(state_qutip, [0,1]))\n state_trans_eigval = np.linalg.eigvalsh(state_transpose_B)\n neg = 0\n for i in range(N**2):\n neg = neg + np.absolute(state_trans_eigval[i]) - state_trans_eigval[i]\n return neg/2\n\ndef MP_IIord(H,lind,Dt,state):\n ''' This function computes the (approximated) quantum state which is the ouput of the \n approximated Lindblad master equation. This is a second order approximation\n from \"Yu Cao and Jianfeng Lu, Structure-preserving numerical schemes for Lindblad\n equations, arxiv 2103.01194, 2021.\"\n \n Parameters\n ----------\n H : ndarray\n Hamiltonian of the system of interest\n lind : ndarray\n Lindblad operators associated to the Markovian noise\n Dt : float\n Time step\n state : ndarray\n Input quantum state\n\n Returns\n -------\n result : ndarray\n Approximated state of the system of interest after the interaction with the\n (Markovian) environment\n\n '''\n\n d = state.shape[0]\n Heff = H_eff(H,lind)\n kraus_I_arg = np.eye(d) + (-1j*Dt)*Heff + ((-1j*Dt)**2)*(Heff@Heff)/2\n I_state = kraus(kraus_I_arg,state)\n\n kraus_II_arg = np.eye(d) + (-1j*Dt/2)*Heff\n state_after_kraus = kraus(kraus_II_arg,state)\n state_after_lindL = Lind_L(lind,state_after_kraus)\n state_after_second_kraus = kraus(kraus_II_arg,state_after_lindL)\n II_state = Dt*state_after_second_kraus\n\n I_LindL = Lind_L(lind,state)\n II_LindL = Lind_L(lind,I_LindL)\n III_state = ((Dt**2)/2)*II_LindL\n\n result = I_state + II_state + III_state\n \n return result\n\ndef unraveling_LME_II(state,H,lind,lind_dagg,dt,N):\n ''' This function computes the (approximated) quantum state which is the ouput of an \n approximated Lindblad master equation. The output state is be approximated with an error \n of :math:`O(dt^3)`, following the approximation scheme reported in \n \"Phys Rev A, 51(4), 3302, 1995\", Eq. (7).\n\n Parameters\n ----------\n state : nddarray\n Input quantum state\n H : ndarray\n Hamiltonian of the system\n lind : ndarray\n Lindblad operators acting on the system\n lind_dagg : ndarray\n Dagger of the Lindblad operators acting on the system\n dt : float\n Time step\n N : int\n Dimension of one subsystem of the system of interest\n\n Returns\n -------\n state : ndarray\n Approximated state of the system of interest after the interaction with the\n (Markovian) environment\n '''\n \n d = state.shape[0]\n Heff = (-1j*dt)*(H_eff(H,lind))\n\n U = scipy.linalg.expm(Heff)\n U_dagg = (np.conjugate(U)).T\n\n I_term = U @ state @ U_dagg\n\n II_term = np.zeros((d,d), dtype=complex)\n III_term = np.zeros((d,d), dtype=complex)\n IV_term = np.zeros((d,d), dtype=complex)\n \n for j in range(N**2-1):\n II_term = II_term + (0.5 * dt)* U @ lind[j] @ state @ lind_dagg[j] @ U_dagg\n III_term = III_term + (0.5 * dt) * lind[j] @ U @ state @ U_dagg @ lind_dagg[j]\n\n for i in range(N**2-1):\n for j in range(N**2-1):\n IV_term = IV_term + (0.5 * dt * dt) * U @ lind[i] @ lind[j] @ state @ lind_dagg[j] @ lind_dagg[i] @ U_dagg\n\n state = I_term + II_term + III_term + IV_term\n state_trace = np.trace(state)\n\n return state/state_trace\n\n\n", "repo_name": "nunziacerrato/ALME", "sub_path": "ALME.py", "file_name": "ALME.py", "file_ext": "py", "file_size_in_byte": 8301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.linalg.eigh", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 138, "usage_type": "call"}, {"api_name": "qutip.Qobj", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "qutip.partial_transpose", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.linalg.eigvalsh", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.absolute", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.linalg.linalg.expm", "line_number": 239, "usage_type": "call"}, {"api_name": "scipy.linalg.linalg", "line_number": 239, "usage_type": "attribute"}, {"api_name": "scipy.linalg", "line_number": 239, "usage_type": "name"}, {"api_name": "numpy.conjugate", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.trace", "line_number": 257, "usage_type": "call"}]} +{"seq_id": "6142497342", "text": "#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\n\r\nimport pandas as pd\r\nfrom joblib import Parallel, delayed\r\nimport multiprocessing as mp\r\nfrom itertools import combinations\r\nimport math\r\nimport gc\r\nimport numpy as np\r\nfrom copy import deepcopy\r\nimport time\r\n\r\n\r\nclass protect_comloc:\r\n\r\n def __init__(self, user_num, tablename):\r\n # self.ano_checkins = []\r\n self.user_num = user_num\r\n self.ano_checkins_tablename = tablename\r\n pass\r\n\r\n def k_freq_loc(self, u, u_checkins, k):\r\n u_loc = pd.DataFrame(u_checkins['locid'].value_counts()).reset_index() # 统计locid的不同值及其个数\r\n u_loc.columns = [\"locid\", \"cnt\"]\r\n if k <= len(u_loc):\r\n u_loc = u_loc[0:k]\r\n return [u, list(u_loc.locid.values)]\r\n\r\n def u_comloc(self, u1_checkins, u2_checkins, u1, u2):\r\n u1_locids = u1_checkins.locid.unique()\r\n u2_locids = u2_checkins.locid.unique()\r\n comloc = list(set(u1_locids).intersection(set(u2_locids)))\r\n return [u1, u2, len(comloc), comloc]\r\n\r\n def comloc_protection(self, checkins, k, m): # k为用户的前k个频繁访问位置、m为前m%的共同访问位置,checkins是签到数据\r\n uids = checkins.uid.unique()\r\n checkin1 = checkins.groupby(by=['locid', 'latitude', 'longitude']).size().reset_index(name=\"locid_time\")\r\n locids = [row[0] for row in checkin1.itertuples(index=False, name=False)] # 记录locid对应的经纬度,以便在替换locid时将相应的位置数据也进行替换\r\n lat_lon = [[row[1], row[2]] for row in checkin1.itertuples(index=False, name=False)]\r\n del checkin1 # 释放checkin1的内存空间\r\n gc.collect()\r\n core_num = mp.cpu_count()\r\n # print(checkins)\r\n # print(uids)\r\n checkins = checkins.sort_values(by=['uid'], ascending=True).reset_index(drop=True) # uid的降序排列\r\n user_k_freqloc = Parallel(n_jobs=core_num)(delayed(self.k_freq_loc)(u, checkins[checkins.uid == u], k) for u in uids)\r\n user_k_freqloc = pd.DataFrame(user_k_freqloc, columns=['uid', 'k_freqlocs'])\r\n # print(user_k_freqloc)\r\n pairs = pd.DataFrame(list(combinations(uids, 2)), columns=['u1', 'u2'])\r\n pairs_comloc = Parallel(n_jobs=core_num)(delayed(self.u_comloc)(checkins[checkins.uid == row[0]], checkins[checkins.uid == row[1]], row[0], row[1]) for row in pairs.itertuples(index=False, name=False))\r\n pairs_comloc = pd.DataFrame(pairs_comloc, columns=[\"u1\", \"u2\", \"comloc_num\", \"comlocs\"])\r\n nums = int(math.ceil(len(pairs_comloc) * m))\r\n pairs_comloc = pairs_comloc.sort_values(by='comloc_num', ascending=False).reset_index(drop=True)\r\n pairs_comloc = pairs_comloc[0:nums] # 20%的共同访问位置用户对,即使需要保持的共同访问位置的用户对\r\n # print(pairs_comloc)\r\n protect_uids = set(pairs_comloc.u1.values).union(set(pairs_comloc.u2.values)) # 需要进行共同访问位置保护的用户id\r\n disturb_uid = set(uids) - protect_uids # 不需要进行共同访问位置保护的用户\r\n # print(disturb_uid)\r\n ano_checkins = []\r\n for u in disturb_uid: # 对于没有需要保护共同访问位置的用户,对签到数据进行随机扰动\r\n u_checkins = deepcopy(checkins[checkins.uid == u])\r\n u_locids = u_checkins.locid.unique()\r\n for row in u_checkins.itertuples(index=False, name=False):\r\n # locid = np.random.choice(locids)\r\n locid = np.random.choice(u_locids)\r\n u_lat_lon = lat_lon[locids.index(locid)]\r\n lat = u_lat_lon[0]\r\n lng = u_lat_lon[1]\r\n # ano_checkin = [row[0], row[1], lat, lng, locid, row[5]]\r\n ano_checkin = [row[0], row[1], lat, lng, locid]\r\n ano_checkins.append(ano_checkin)\r\n for u in protect_uids: # 对于需要保护共同访问位置的用户,除了共同访问位置中的非频繁访问位置之外的所有位置进行扰动\r\n u_checkins = deepcopy(checkins[checkins.uid == u])\r\n u_locids = u_checkins.locid.unique()\r\n comlocs = pairs_comloc[(pairs_comloc.u1 == u) | (pairs_comloc.u2 == u)].comlocs\r\n u_comlocs = []\r\n list(map(lambda x: u_comlocs.extend(x), comlocs))\r\n u_comlocs = set(u_comlocs)\r\n u_freqloc = user_k_freqloc[user_k_freqloc.uid == u].k_freqlocs.values[0]\r\n u_protect_locs = set(u_comlocs) - set(u_freqloc) # 共同访问位置和频繁访问位置的差集\r\n # if (len(set(u_comlocs) - set(locids)) == 0) & (len(set(u_comlocs)) == len(set(locids))):\r\n # u_distrublocs = list(set(locids))\r\n if len(set.union(set(u_comlocs), set(u_freqloc))) == len(u_locids):\r\n u_distrublocs = list(set(u_locids))\r\n # if (len(set(u_comlocs) - set(u_locids)) == 0) & (len(set(u_comlocs)) == len(set(u_locids))):\r\n # u_distrublocs = list(set(u_locids))\r\n elif len(set(u_comlocs).intersection(set(u_freqloc))) == 0:\r\n u_distrublocs = list(set(u_locids))\r\n else:\r\n u_distrublocs = list(set(u_locids) - u_protect_locs) # 可以选择的locid\r\n for row in u_checkins.itertuples(index=False, name=False):\r\n if row[4] not in u_protect_locs:\r\n locid = np.random.choice(u_distrublocs)\r\n u_lat_lon = lat_lon[locids.index(locid)]\r\n lat = u_lat_lon[0]\r\n lng = u_lat_lon[1]\r\n # ano_checkin = [row[0], row[1], lat, lng, locid, row[5]]\r\n ano_checkin = [row[0], row[1], lat, lng, locid]\r\n ano_checkins.append(ano_checkin)\r\n else:\r\n ano_checkins.append(list(row))\r\n self.save_ano_checkins(ano_checkins)\r\n # return ano_checkins # 扰动后的匿名数据\r\n\r\n def save_ano_checkins(self, ano_checkin):\r\n ano_checkins = pd.DataFrame(ano_checkin)\r\n ano_checkins.to_csv(\"G:/pyfile/relation_protect/src/data/result_data/\" + self.ano_checkins_tablename + \"/3_\" +\r\n str(self.user_num) + \"_\" + self.ano_checkins_tablename + \".csv\", header=None, index=None, sep='\\t', mode='a')\r\n\r\n def comnunity_disturb(self, checkins, k, m):\r\n community_checkins = checkins.groupby([\"clusterid\"])\r\n print(len(community_checkins))\r\n checkin_cnt = 0\r\n checkin_chunk_size = math.ceil(len(community_checkins) / 10)\r\n for group in community_checkins:\r\n if checkin_cnt % checkin_chunk_size == 0: # finished the anonymization of a chunk of checkins打印一部分匿名化的结果\r\n print('%-3d%% work complete.' % (int(checkin_cnt / checkin_chunk_size) * 10))\r\n self.comloc_protection(group[1], k, m)\r\n checkin_cnt += 1\r\n # 将数据按照uid升序进行排列\r\n ano_checkins = pd.read_csv(\"G:/pyfile/relation_protect/src/data/result_data/\" + self.ano_checkins_tablename + \"/3_\" +\r\n str(self.user_num) + \"_\" + self.ano_checkins_tablename + \".csv\", index_col=None,\r\n sep='\\t', names=['uid', 'times', 'latitude', 'longitude', 'locid', 'clusterid'])\r\n ano_checkins = ano_checkins.sort_values(by=['uid']).reset_index(drop=True)\r\n ano_checkins.to_csv(\"G:/pyfile/relation_protect/src/data/result_data/\" + self.ano_checkins_tablename + \"/3_\" +\r\n str(self.user_num) + \"_\" + self.ano_checkins_tablename + \".csv\", header=None, index=None, sep='\\t')\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # for k in [3, 4, 5, 6, 7, 8, 9, 10]:\r\n for k in [3]:\r\n start = time.time()\r\n pc = protect_comloc(int(k), \"comloc\")\r\n checkins = pd.read_csv(\"G:/pyfile/relation_protect/src/data/city_data/test.csv\", delimiter=\"\\t\", index_col=None)\r\n # checkins = pd.read_csv(\r\n # \"G:/pyfile/relation_protect/src/data/result_data/1_comloc_\" + str(k) + \"_user_simple_community.data\",\r\n # delimiter=\"\\t\", names=[\"uid\", \"time\", \"latitude\", \"longitude\", \"locid\", \"clusterid\"], header=None)\r\n # pc.comnunity_disturb(checkins, 3, 0.6)\r\n\r\n pc.comloc_protection(checkins, 3, 0.2)\r\n end = time.time()\r\n print(\"花费时间:\", str(end-start))\r\n # data = pd.DataFrame(data, columns=[\"uid\", \"time\", \"lat\", \"lng\", \"locid\"])\r\n # print(data)\r\n # data.to_csv(\"G:/pyfile/relation_protect/src/data/city_data/test1.csv\", sep='\\t', index=False, header=False)\r\n", "repo_name": "gjmgjm/gjm_community_divide", "sub_path": "utils/protect_comloc.py", "file_name": "protect_comloc.py", "file_ext": "py", "file_size_in_byte": 8679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 43, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 44, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 48, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 51, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 51, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 52, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 54, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 123, "usage_type": "call"}, {"api_name": "time.time", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 136, "usage_type": "call"}, {"api_name": "time.time", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "30893782086", "text": "import os\nimport logging\n\nfrom flask import Flask, request, redirect, session, jsonify, url_for, render_template\nfrom flask_login import login_user, LoginManager, UserMixin, logout_user, current_user\nimport dash\nfrom dash import html\nimport dash_bootstrap_components as dbc\nfrom dash_bootstrap_templates import load_figure_template\n\nfrom ..garjus import Garjus\nfrom .pages import qa\nfrom .pages import activity\nfrom .pages import issues\nfrom .pages import queue\nfrom .pages import stats\nfrom .pages import analyses\nfrom .pages import processors\nfrom .pages import reports\n\n\n# This file serves the same purpose as index.py but wrapped in a flask app\n# with user/password authentication. garjus will return this app when\n# login option is requested.\n\nlogger = logging.getLogger('garjus.dashboard.login')\n\n# Connect to an underlying flask server so we can configure it for auth\ntemplates = os.path.expanduser('~/git/garjus/garjus/dashboard/templates')\nserver = Flask(__name__, template_folder=templates)\n\n\n@server.before_request\ndef check_login():\n # TODO: use dash pages module to return pages based on user access level\n if request.method == 'GET':\n if request.path in ['/login', '/logout']:\n # nothing to check here\n return\n if is_authenticated():\n logger.debug(f'user is authenticated:{current_user.id}')\n return\n\n # nothing to check so user must log in\n return redirect(url_for('login'))\n else:\n if current_user:\n if request.path == '/login' or is_authenticated():\n return\n\n logout_user()\n return\n\n\ndef is_authenticated():\n return current_user and current_user.is_authenticated and Garjus.is_authenticated()\n\n\n@server.route('/login', methods=['POST', 'GET'])\ndef login(message=\"\"):\n if request.method == 'POST':\n if request.form:\n hostname = 'https://xnat.vanderbilt.edu/xnat'\n username = request.form['username']\n password = request.form['password']\n\n try:\n # Get the xnat alias token\n from ..garjus import Garjus\n Garjus.login(hostname, username, password)\n\n login_user(User(username, hostname))\n\n # What page do we send?\n if session.get('url', False):\n # redirect to original target\n url = session['url']\n logger.debug(f'redirecting to target url:{url}')\n session['url'] = None\n return redirect(url)\n else:\n # redirect to home\n return redirect('/')\n except Exception as err:\n logger.debug(f'login failed:{err}')\n message = 'login failed, try again'\n else:\n if current_user:\n if current_user.is_authenticated:\n try:\n return redirect('/')\n except Exception as err:\n logger.debug(f'cannot log in, try again:{err}')\n message = 'login failed, try again'\n\n return render_template('login.html', message=message)\n\n\n@server.route('/logout', methods=['GET'])\ndef logout():\n if current_user:\n if current_user.is_authenticated:\n logout_user()\n return render_template('login.html', message=\"you have been logged out\")\n\n# Prep the configs for the app\ndbc_css = \"https://cdn.jsdelivr.net/gh/AnnMarieW/dash-bootstrap-templates/dbc.min.css\"\nassets_path = os.path.expanduser('~/git/garjus/garjus/dashboard/assets')\ndarkmode = True\n\nif darkmode:\n stylesheets = [dbc.themes.DARKLY, dbc_css]\n load_figure_template(\"darkly\")\nelse:\n stylesheets = [dbc.themes.FLATLY, dbc_css]\n load_figure_template(\"flatly\")\n\n# Build the dash app with the configs\napp = dash.Dash(\n __name__,\n server=server,\n external_stylesheets=stylesheets,\n assets_folder=assets_path,\n suppress_callback_exceptions=True,\n)\n\n# Set the title to appear on web pages\napp.title = 'dashboard'\n\nserver.config.update(SECRET_KEY=os.urandom(24))\n\n# Login manager object will be used to login / logout users\nlogin_manager = LoginManager()\nlogin_manager.init_app(server)\nlogin_manager.login_view = \"/login\"\n\n\nclass User(UserMixin):\n # User data model. It has to have at least self.id as a minimum\n def __init__(self, username, hostname=None):\n self.id = username\n self.hostname = hostname\n\n\n@login_manager.user_loader\ndef load_user(username):\n \"\"\"This function loads the user by user id.\"\"\"\n return User(username)\n\n\nfooter_content = [\n html.Hr(),\n html.Div(\n [\n dbc.Row([\n dbc.Col(\n html.A(\n \"garjus\",\n href='https://github.com/ccmvumc/garjus',\n target=\"_blank\",\n ),\n ),\n dbc.Col(\n html.A('xnat', href='https://xnat.vanderbilt.edu/xnat'),\n ),\n dbc.Col(\n html.A('logout', href='../logout'),\n ),\n ]),\n ],\n style={'textAlign': 'center'},\n ),\n]\n\nif Garjus.redcap_found():\n tabs = dbc.Tabs([\n dbc.Tab(\n label='QA',\n tab_id='tab-qa',\n children=qa.get_content(),\n ),\n dbc.Tab(\n label='Issues',\n tab_id='tab-issues',\n children=issues.get_content(),\n ),\n dbc.Tab(\n label='Queue',\n tab_id='tab-queue',\n children=queue.get_content(),\n ),\n dbc.Tab(\n label='Activity',\n tab_id='tab-activity',\n children=activity.get_content(),\n ),\n dbc.Tab(\n label='Stats',\n tab_id='tab-stats',\n children=stats.get_content(),\n ),\n dbc.Tab(\n label='Processors',\n tab_id='tab-processors',\n children=processors.get_content(),\n ),\n dbc.Tab(\n label='Reports',\n tab_id='tab-reports',\n children=reports.get_content(),\n ),\n dbc.Tab(\n label='Analyses',\n tab_id='tab-analyses',\n children=analyses.get_content(),\n ),\n ])\nelse:\n tabs = html.Div(qa.get_content())\n\n# Wrap in a Div with margins\napp.layout = html.Div(\n className='dbc',\n style={'marginLeft': '20px', 'marginRight': '20px'},\n children=[\n html.Div(id='report-content', children=[tabs]),\n html.Div(id='footer-content', children=footer_content)\n ])\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n", "repo_name": "ccmvumc/garjus", "sub_path": "garjus/dashboard/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 6715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 56, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 56, "usage_type": "attribute"}, {"api_name": "garjus.Garjus.is_authenticated", "line_number": 56, "usage_type": "call"}, {"api_name": "garjus.Garjus", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "garjus.Garjus.login", "line_number": 70, "usage_type": "call"}, {"api_name": "garjus.Garjus", "line_number": 70, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 88, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 96, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 101, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 102, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 112, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_templates.load_figure_template", "line_number": 113, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.themes", "line_number": 115, "usage_type": "attribute"}, {"api_name": "dash_bootstrap_templates.load_figure_template", "line_number": 116, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 119, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 130, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 133, "usage_type": "call"}, {"api_name": "flask_login.UserMixin", "line_number": 138, "usage_type": "name"}, {"api_name": "dash.html.Hr", "line_number": 152, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 152, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 153, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 153, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Row", "line_number": 155, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 156, "usage_type": "call"}, {"api_name": "dash.html.A", "line_number": 157, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 157, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 163, "usage_type": "call"}, {"api_name": "dash.html.A", "line_number": 164, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 164, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Col", "line_number": 166, "usage_type": "call"}, {"api_name": "dash.html.A", "line_number": 167, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 167, "usage_type": "name"}, {"api_name": "garjus.Garjus.redcap_found", "line_number": 175, "usage_type": "call"}, {"api_name": "garjus.Garjus", "line_number": 175, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tabs", "line_number": 176, "usage_type": "call"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 177, "usage_type": "call"}, {"api_name": "pages.qa.get_content", "line_number": 180, "usage_type": "call"}, {"api_name": "pages.qa", "line_number": 180, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 182, "usage_type": "call"}, {"api_name": "pages.issues.get_content", "line_number": 185, "usage_type": "call"}, {"api_name": "pages.issues", "line_number": 185, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 187, "usage_type": "call"}, {"api_name": "pages.queue.get_content", "line_number": 190, "usage_type": "call"}, {"api_name": "pages.queue", "line_number": 190, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 192, "usage_type": "call"}, {"api_name": "pages.activity.get_content", "line_number": 195, "usage_type": "call"}, {"api_name": "pages.activity", "line_number": 195, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 197, "usage_type": "call"}, {"api_name": "pages.stats.get_content", "line_number": 200, "usage_type": "call"}, {"api_name": "pages.stats", "line_number": 200, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 202, "usage_type": "call"}, {"api_name": "pages.processors.get_content", "line_number": 205, "usage_type": "call"}, {"api_name": "pages.processors", "line_number": 205, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 207, "usage_type": "call"}, {"api_name": "pages.reports.get_content", "line_number": 210, "usage_type": "call"}, {"api_name": "pages.reports", "line_number": 210, "usage_type": "name"}, {"api_name": "dash_bootstrap_components.Tab", "line_number": 212, "usage_type": "call"}, {"api_name": "pages.analyses.get_content", "line_number": 215, "usage_type": "call"}, {"api_name": "pages.analyses", "line_number": 215, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 219, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 219, "usage_type": "name"}, {"api_name": "pages.qa.get_content", "line_number": 219, "usage_type": "call"}, {"api_name": "pages.qa", "line_number": 219, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 222, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 222, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 226, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 226, "usage_type": "name"}, {"api_name": "dash.html.Div", "line_number": 227, "usage_type": "call"}, {"api_name": "dash.html", "line_number": 227, "usage_type": "name"}]} +{"seq_id": "70228563963", "text": "from detectron2.structures import BoxMode\nfrom pathlib import Path\nimport json\nimport cv2\n\nSAL_THR = 0.5\n\n\ndef get_assr_dicts(root, mode):\n root = Path(root)\n json_file = root / f\"obj_seg_data_{mode}.json\"\n list_file = root / f\"{mode}_images.txt\"\n with open(json_file) as f:\n imgs_anns = json.load(f)\n\n dataset_dicts = []\n for idx, anno in enumerate(imgs_anns):\n record = {}\n\n filename = str(root / 'images' / mode / (anno['img'] + '.jpg'))\n height, width = cv2.imread(filename).shape[:2]\n\n record[\"file_name\"] = filename\n record[\"image_id\"] = idx\n record[\"height\"] = height\n record[\"width\"] = width\n\n with open(root / 'rank_order' / mode / (anno['img'] + '.json')) as f:\n ranker_order = json.load(f)['rank_order']\n\n objs = []\n assert len(ranker_order) == len(\n anno[\"object_data\"]), \"Every box should correspond a rank order\"\n\n for rank, obj_anno in zip(ranker_order, anno[\"object_data\"]):\n # 这里要过滤一下rank <= 0.5 的 box\n if rank > SAL_THR:\n obj = {\n \"bbox\": obj_anno['bbox'],\n \"bbox_mode\": BoxMode.XYXY_ABS,\n \"segmentation\": obj_anno['segmentation'],\n \"category_id\": 0,\n \"gt_rank\": int(rank * 10 - 6) # map 0.5~1.0 to 0,1,2,3,4\n }\n objs.append(obj)\n record[\"annotations\"] = objs\n dataset_dicts.append(record)\n return dataset_dicts\n", "repo_name": "EricFH/SOR", "sub_path": "sor_ppa/sor/assr_register.py", "file_name": "assr_register.py", "file_ext": "py", "file_size_in_byte": 1550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "detectron2.structures.BoxMode.XYXY_ABS", "line_number": 40, "usage_type": "attribute"}, {"api_name": "detectron2.structures.BoxMode", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "40458265217", "text": "import discord\nfrom discord.ext import commands\n\n\nclass EmbedPaginator(commands.Paginator):\n\n def __init__(self, embed=None):\n\n self._embed = embed or discord.Embed()\n\n self.clear()\n self._count = 0\n\n def new_page(self):\n self._current_page = self._embed.copy()\n self._current_page.description = ''\n\n def clear(self):\n self._pages = []\n self.new_page()\n\n def add_line(self, line='', *, empty=False):\n max_description_size = 2048\n if len(line) > max_description_size:\n raise RuntimeError('Line exceeds maximum size')\n\n # Close page if too large to add\n if len(self._current_page.description) + len(line) + 1 > max_description_size:\n self.close_page()\n\n max_embed_size = 5500\n if len(self._current_page) + len(line) + 1 > max_embed_size:\n self.close_page()\n\n self._current_page.description += '\\n' + line + ('\\n' if empty else '')\n\n def add_field(self, name, value, *, inline=False):\n\n max_field_name_size = 256\n if len(name) > max_field_name_size:\n raise RuntimeError('Field name exceeds maximum size')\n\n max_field_value_size = 1024\n if len(value) > max_field_value_size:\n raise RuntimeError('Field value exceeds maximum size')\n\n max_fields = 25\n if len(self._current_page.fields) == max_fields:\n self.close_page()\n\n max_embed_size = 5500\n if len(self._current_page) + len(name) + len(value) > max_embed_size:\n self.close_page()\n\n self._current_page.add_field(name=name, value=value, inline=inline)\n\n def close_page(self):\n\n # Add cont if required.\n if len(self._pages) >= 1:\n if self._current_page.author.name:\n self._current_page.set_author(\n name=self._current_page.author.name + ' Cont.',\n url=self._current_page.author.url,\n icon_url=self._current_page.author.icon_url\n )\n\n self._pages.append(self._current_page)\n self.new_page()\n\n def __repr__(self):\n fmt = ''\n return fmt.format(self)\n", "repo_name": "MrFr0asty/Ditto", "sub_path": "bot/utils/paginator.py", "file_name": "paginator.py", "file_ext": "py", "file_size_in_byte": 2206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "discord.ext.commands.Paginator", "line_number": 5, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 5, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "34328312047", "text": "import abc\n\nimport requests as rq\n\nfrom . import util\n\n\nclass Endpoint:\n def __init__(self, url):\n self.url = url\n\n\nclass Wikidata(Endpoint):\n def __init__(self):\n super().__init__(\"https://query.wikidata.org/sparql\")\n\n def query(self, query):\n response = rq.post(\n self.url,\n params={\"format\": \"json\"},\n headers={\n \"Content-Type\": \"application/sparql-query\",\n \"Accept\": \"application/json\",\n },\n data=query,\n )\n\n return WikidataResult(response.json())\n\n\nclass WikidataResult:\n def __init__(self, data):\n self.data = data\n\n @util.cached_property\n def bindings(self):\n return self.data[\"results\"][\"bindings\"]\n", "repo_name": "emilbaekdahl/masters-code", "sub_path": "kgdata/sparql.py", "file_name": "sparql.py", "file_ext": "py", "file_size_in_byte": 759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "requests.post", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "11042803438", "text": "import esphome.codegen as cg\nimport esphome.config_validation as cv\nfrom esphome.components import ble_client, light\nfrom esphome.const import (\n CONF_ID,\n CONF_OUTPUT_ID,\n)\n\n\nCODEOWNERS = [\"@hcoohb\"]\nDEPENDENCIES = [\"light\"]\n\nyeelight_bt_ns = cg.esphome_ns.namespace(\"yeelight_bt\")\nYeelight_bt = yeelight_bt_ns.class_(\"Yeelight_bt\", ble_client.BLEClientNode, cg.Component, light.LightOutput)\n\nCONFIG_SCHEMA = (\n # cv.Schema(\n # {\n # cv.GenerateID(): cv.declare_id(Yeelight_bt),\n # }\n # )\n light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend({\n cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(Yeelight_bt),\n })\n .extend(ble_client.BLE_CLIENT_SCHEMA)\n .extend(cv.COMPONENT_SCHEMA)\n # .extend(light.BRIGHTNESS_ONLY_LIGHT_SCHEMA)\n)\n\n\ndef to_code(config):\n var = cg.new_Pvariable(config[CONF_OUTPUT_ID])\n yield cg.register_component(var, config)\n yield ble_client.register_ble_node(var, config)\n yield light.register_light(var, config)\n", "repo_name": "hcoohb/esphome-components", "sub_path": "components/yeelight_bt/light.py", "file_name": "light.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "esphome.codegen.esphome_ns.namespace", "line_number": 13, "usage_type": "call"}, {"api_name": "esphome.codegen.esphome_ns", "line_number": 13, "usage_type": "attribute"}, {"api_name": "esphome.codegen", "line_number": 13, "usage_type": "name"}, {"api_name": "esphome.components.ble_client.BLEClientNode", "line_number": 14, "usage_type": "attribute"}, {"api_name": "esphome.components.ble_client", "line_number": 14, "usage_type": "name"}, {"api_name": "esphome.codegen.Component", "line_number": 14, "usage_type": "attribute"}, {"api_name": "esphome.codegen", "line_number": 14, "usage_type": "name"}, {"api_name": "esphome.components.light.LightOutput", "line_number": 14, "usage_type": "attribute"}, {"api_name": "esphome.components.light", "line_number": 14, "usage_type": "name"}, {"api_name": "esphome.components.light.BRIGHTNESS_ONLY_LIGHT_SCHEMA.extend", "line_number": 22, "usage_type": "call"}, {"api_name": "esphome.components.light.BRIGHTNESS_ONLY_LIGHT_SCHEMA", "line_number": 22, "usage_type": "attribute"}, {"api_name": "esphome.components.light", "line_number": 22, "usage_type": "name"}, {"api_name": "esphome.config_validation.GenerateID", "line_number": 23, "usage_type": "call"}, {"api_name": "esphome.const.CONF_OUTPUT_ID", "line_number": 23, "usage_type": "argument"}, {"api_name": "esphome.config_validation", "line_number": 23, "usage_type": "name"}, {"api_name": "esphome.config_validation.declare_id", "line_number": 23, "usage_type": "call"}, {"api_name": "esphome.components.ble_client.BLE_CLIENT_SCHEMA", "line_number": 25, "usage_type": "attribute"}, {"api_name": "esphome.components.ble_client", "line_number": 25, "usage_type": "name"}, {"api_name": "esphome.config_validation.COMPONENT_SCHEMA", "line_number": 26, "usage_type": "attribute"}, {"api_name": "esphome.config_validation", "line_number": 26, "usage_type": "name"}, {"api_name": "esphome.codegen.new_Pvariable", "line_number": 32, "usage_type": "call"}, {"api_name": "esphome.codegen", "line_number": 32, "usage_type": "name"}, {"api_name": "esphome.const.CONF_OUTPUT_ID", "line_number": 32, "usage_type": "name"}, {"api_name": "esphome.codegen.register_component", "line_number": 33, "usage_type": "call"}, {"api_name": "esphome.codegen", "line_number": 33, "usage_type": "name"}, {"api_name": "esphome.components.ble_client.register_ble_node", "line_number": 34, "usage_type": "call"}, {"api_name": "esphome.components.ble_client", "line_number": 34, "usage_type": "name"}, {"api_name": "esphome.components.light.register_light", "line_number": 35, "usage_type": "call"}, {"api_name": "esphome.components.light", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "40940041525", "text": "import discord\nimport asyncio\nimport json\nimport aiohttp\n\n\nfrom requests_html import AsyncHTMLSession\nfrom discord.ext import commands\n\n\n\nclass MemesBot(commands.Cog):\n\n mem_text_channel = None\n\n def __init__(self, bot):\n self.bot = bot\n self.url = 'https://admem.ru/rndm'\n self.atkritka_url = 'http://atkritka.com/random_ok/'\n self.joke_url = 'http://rzhunemogu.ru/RandJSON.aspx?CType=1'\n self.asession = AsyncHTMLSession()\n\n @commands.Cog.listener()\n async def on_ready(self):\n \n channels = self.bot.get_all_channels()\n Category = None\n\n for cat in channels:\n if(cat.name == 'Текстовые каналы'):\n Category = cat\n \n channels = self.bot.get_all_channels()\n\n for ch in channels:\n if(ch.name == 'мемы'):\n MemesBot.mem_text_channel = ch.id\n return\n\n\n if(MemesBot.mem_text_channel is None):\n Guilds = self.bot.guilds \n for guild in Guilds: \n channel = await guild.create_text_channel('мемы',category=Category)\n MemesBot.mem_text_channel = channel.id\n return\n\n \n\n @commands.command()\n async def meme(self, ctx):\n \"\"\" Send meme on channel \"\"\" \n if ctx.channel.id == MemesBot.mem_text_channel: \n\n async def get_code():\n r = await self.asession.get(self.url)\n await r.html.arender(sleep=1, keep_page=True)\n return r\n\n response = await get_code() \n noindex = response.html.find('noindex', first=True)\n img = noindex.xpath('//img')[0]\n\n await ctx.send('http:' + img.attrs['src'])\n \n @commands.command()\n async def atkritka(self, ctx):\n \"\"\" Send meme on channel \"\"\" \n if ctx.channel.id == MemesBot.mem_text_channel: \n\n async def get_code():\n r = await self.asession.get(self.atkritka_url)\n await r.html.arender(sleep=1, keep_page=True)\n return r\n\n response = await get_code() \n content = response.html.find('content', first=True)\n detail = content.html.find('detailt', first=True)\n img = detail.xpath('//img')[0]\n\n await ctx.send('http:' + img.attrs['src'])\n\n @commands.command()\n async def joke(self,ctx):\n \"\"\" Send joke on channel \"\"\" \n async with aiohttp.ClientSession() as sess:\n async with sess.get(self.joke_url) as resp:\n\n text = (await resp.text()).replace('\\r\\n', '\\\\r\\\\n')\n\n data = json.loads(text)\n joke = data['content']\n\n await ctx.send( '```' + str(joke) + '```')\n\n", "repo_name": "1e3m/bot_music", "sub_path": "bot_music/bot/memes_bot.py", "file_name": "memes_bot.py", "file_ext": "py", "file_size_in_byte": 2803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "requests_html.AsyncHTMLSession", "line_number": 21, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 23, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 23, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 50, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 66, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 66, "usage_type": "name"}, {"api_name": "aiohttp.ClientSession", "line_number": 86, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 83, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "37519657922", "text": "import sys\nimport random\nimport settings\nimport utility\nimport sfml as sf\nfrom statistics import Statistics\nfrom entity import Entity\nfrom collision_grid import CollisionGrid\n\n\ntime_per_update = settings.TIME_PER_UPDATE\n\nwidth = settings.WIDTH\nheight = settings.HEIGHT\ncell_size = settings.CELL_SIZE\n\n\nclass Application(object):\n\n\tdef __init__(self):\n\n\t\tfont = sf.Font.from_file(\"media/fonts/UbuntuMono-R.ttf\")\n\t\tself.statistics = Statistics(font)\n\n\t\twindow_settings = sf.window.ContextSettings()\n\t\twindow_settings.antialiasing_level = 8\n\t\tself.window = sf.RenderWindow(\n\t\t\tsf.VideoMode(width, height),\n\t\t\t\"Steering Behaviors For Autonomous Characters\",\n\t\t\tsf.Style.DEFAULT, window_settings)\n\t\tself.window.vertical_synchronization = False\n\n\t\tself.entities = []\n\t\tself.grid = CollisionGrid(width, height, cell_size)\n\t\tself.collision = 0\n\n\n\tdef run(self):\n\n\t\tclock = sf.Clock()\n\t\ttime_since_last_update = sf.seconds(0)\n\t\tfor i in range(settings.INITIAL_ENTITIES):\n\t\t\tself.entities.append(Entity(sf.Vector2(\n\t\t\t\trandom.randrange(width),\n\t\t\t\trandom.randrange(height))))\n\n\t\twhile self.window.is_open:\n\n\t\t\tdt = clock.restart()\n\t\t\ttime_since_last_update += dt\n\n\t\t\twhile time_since_last_update > time_per_update:\n\t\t\t\ttime_since_last_update -= time_per_update\n\n\t\t\t\tt = clock.elapsed_time\n\t\t\t\tself.update(time_per_update)\n\t\t\t\tt = clock.elapsed_time - t\n\t\t\t\tself.statistics.t_update.append(t.microseconds)\n\n\t\t\tself.process_events()\n\t\t\tself.statistics.update_texts(dt)\n\n\t\t\tt = clock.elapsed_time\n\t\t\tself.render()\n\t\t\tself.statistics.num_frames += 1\n\t\t\tt = clock.elapsed_time - t\n\t\t\tself.statistics.t_render.append(t.microseconds)\n\n\n\tdef process_events(self):\n\n\t\tdef close():\n\t\t\tself.window.close()\n\n\t\tdef toggle_help():\n\t\t\tself.statistics.help ^= True\n\n\t\tdef delete_entities():\n\t\t\tdel self.entities[:]\n\n\t\tdef toggle_attractive_mouse():\n\t\t\tsettings.attractive_mouse = not settings.attractive_mouse\n\n\t\tdef toggle_scary_mouse():\n\t\t\tsettings.scary_mouse = not settings.scary_mouse\n\n\t\tdef scatter_boids():\n\t\t\tfor e in self.entities:\n\t\t\t\te.scatter()\n\n\t\tdef increase_boid_sight_radius():\n\t\t\tsettings.boid_sight_radius += 10\n\n\t\tdef decrease_boid_sight_radius():\n\t\t\tsettings.boid_sight_radius -= 10\n\n\t\tdef increase_desired_separation():\n\t\t\tsettings.desired_separation += 10\n\n\t\tdef decrease_desired_separation():\n\t\t\tsettings.desired_separation -= 10\n\n\t\tdef increase_max_steering_force():\n\t\t\tsettings.max_steering_force += 10\n\n\t\tdef decrease_max_steering_force():\n\t\t\tsettings.max_steering_force -= 10\n\n\t\tdef increase_separation_factor():\n\t\t\tsettings.separation += 0.1\n\n\t\tdef decrease_separation_factor():\n\t\t\tsettings.separation -= 0.1\n\n\t\tdef increase_alignment_factor():\n\t\t\tsettings.alignment += 0.01\n\n\t\tdef decrease_alignment_factor():\n\t\t\tsettings.alignment -= 0.01\n\n\t\tdef increase_cohesion_factor():\n\t\t\tsettings.cohesion += 0.01\n\n\t\tdef decrease_cohesion_factor():\n\t\t\tsettings.cohesion -= 0.01\n\n\t\tactions = {\n\t\t\tsf.Keyboard.ESCAPE : close,\n\t\t\tsettings.toggle_help : toggle_help,\n\t\t\tsettings.delete_entities : delete_entities,\n\t\t\tsettings.toggle_attractive_mouse : toggle_attractive_mouse,\n\t\t\tsettings.toggle_scary_mouse : toggle_scary_mouse,\n\t\t\tsettings.scatter_boids : scatter_boids,\n\t\t\tsettings.increase_boid_sight_radius : increase_boid_sight_radius,\n\t\t\tsettings.decrease_boid_sight_radius : decrease_boid_sight_radius,\n\t\t\tsettings.increase_desired_separation : increase_desired_separation,\n\t\t\tsettings.decrease_desired_separation : decrease_desired_separation,\n\t\t\tsettings.increase_max_steering_force : increase_max_steering_force,\n\t\t\tsettings.decrease_max_steering_force : decrease_max_steering_force,\n\t\t\tsettings.increase_separation_factor : increase_separation_factor,\n\t\t\tsettings.decrease_separation_factor : decrease_separation_factor,\n\t\t\tsettings.increase_alignment_factor : increase_alignment_factor,\n\t\t\tsettings.decrease_alignment_factor : decrease_alignment_factor,\n\t\t\tsettings.increase_cohesion_factor : increase_cohesion_factor,\n\t\t\tsettings.decrease_cohesion_factor : decrease_cohesion_factor\n\t\t}\n\n\n\t\tfor event in self.window.events:\n\n\t\t\tif (type(event) is sf.CloseEvent):\n\t\t\t\tclose()\n\n\t\t\telif (type(event) is sf.MouseButtonEvent and event.pressed):\n\t\t\t\tself.entities.append(Entity(event.position))\n\n\t\t\telif (type(event) is sf.KeyEvent and event.pressed):\n\t\t\t\ttry:\n\t\t\t\t\tactions.get(event.code)()\n\t\t\t\texcept TypeError:\n\t\t\t\t\tpass\n\n\n\tdef update(self, dt):\n\n\t\tif self.collision == 5:\n\t\t\tself.update_grid()\n\t\t\tself.handle_collision()\n\t\t\tself.grid.clear()\n\t\t\tself.collision = 0\n\t\tself.collision += 1\n\n\t\tfor e in self.entities:\n\t\t\te.update(dt)\n\t\t\tif (e.position.x < 0):\n\t\t\t\te.position.x += width\n\t\t\telif (e.position.x > width):\n\t\t\t\te.position.x -= width\n\t\t\tif (e.position.y < 0):\n\t\t\t\te.position.y += height\n\t\t\telif (e.position.y > height):\n\t\t\t\te.position.y -= height\n\n\n\tdef render(self):\n\n\t\tself.window.clear()\n\t\tfor e in self.entities:\n\t\t\tself.window.draw(e)\n\t\tself.window.draw(self.grid)\n\t\tself.window.draw(self.statistics)\n\t\tself.window.display()\n\n\n\tdef update_grid(self):\n\n\t\tfor e in self.entities:\n\t\t\tself.grid.add_entity(e)\n\n\n\tdef handle_collision(self):\n\n\t\tself.statistics.num_entities = len(self.entities)\n\t\tself.statistics.collision_checks = 0\n\n\t\tfor f in self.entities:\n\n\t\t\tself.grid.remove_entity(f)\n\t\t\tnearby_boids = self.grid.get_nearby_entities(f,\n\t\t\t\tsettings.boid_sight_radius)\n\n\t\t\t# SCARY MOUSE\n\t\t\tif (settings.scary_mouse):\n\t\t\t\td = sf.Mouse.get_position(self.window) - f.position\n\t\t\t\tif (not d.x and not d.y):\n\t\t\t\t\td.x = 1\n\t\t\t\tdistance = utility.length(d)\n\t\t\t\tif (distance < settings.ENTITY_SIZE * 7):\n\t\t\t\t\tsteer = d * (settings.ENTITY_SIZE * 7/distance - 1)\n\t\t\t\t\tif (utility.length(steer) > settings.max_steering_force):\n\t\t\t\t\t\tsteer = utility.unit_vector(steer) * settings.max_steering_force\n\t\t\t\t\tf.velocity -= steer\n\n\t\t\t# ATTRACTIVE MOUSE\n\t\t\tif (settings.attractive_mouse):\n\t\t\t\tf.centre_of_mass += sf.Mouse.get_position(self.window)\n\t\t\t\tf.num_nearby_entities += 1\n\n\t\t\t# NEARBY BOIDS\n\t\t\tfor s in nearby_boids:\n\t\t\t\tself.statistics.collision_checks += 1\n\n\t\t\t\td = s.position - f.position\n\t\t\t\tif (not d.x and not d.y):\n\t\t\t\t\td.x = 1\n\t\t\t\tdistance = utility.length(d)\n\n\t\t\t\tif (distance < settings.boid_sight_radius):\n\t\t\t\t\tf.centre_of_mass += s.position\n\t\t\t\t\ts.centre_of_mass += f.position\n\t\t\t\t\tf.average_velocity += s.velocity\n\t\t\t\t\ts.average_velocity += f.velocity\n\t\t\t\t\tf.num_nearby_entities += 1\n\t\t\t\t\ts.num_nearby_entities += 1\n\n\t\t\t\t\t# SEPARATE\n\t\t\t\t\tif (distance < settings.desired_separation):\n\t\t\t\t\t\tsteer = d * (settings.desired_separation/distance - 1) * settings.separation\n\t\t\t\t\t\tif (utility.length(steer) > settings.max_steering_force):\n\t\t\t\t\t\t\tsteer = utility.unit_vector(steer) * settings.max_steering_force\n\t\t\t\t\t\tf.velocity -= steer\n\t\t\t\t\t\ts.velocity += steer\n\n\t\t\tif (not f.num_nearby_entities):\n\t\t\t\tcontinue\n\n\t\t\t# COHERE\n\t\t\tdesired = f.centre_of_mass / f.num_nearby_entities - f.position\n\t\t\td = utility.length(desired)\n\t\t\tif (d < 65):\n\t\t\t\tdesired *= d/15\n\t\t\telse:\n\t\t\t\tdesired *= 65/15\n\t\t\tsteer = desired-f.velocity\n\t\t\tif (utility.length(steer) > settings.max_steering_force):\n\t\t\t\tsteer = utility.unit_vector(steer) * settings.max_steering_force\n\t\t\tf.velocity += steer * settings.cohesion\n\n\t\t\t# ALIGN\n\t\t\tsteer = f.average_velocity / f.num_nearby_entities\n\t\t\tif (utility.length(steer) > settings.max_steering_force):\n\t\t\t\tsteer = utility.unit_vector(steer) * settings.max_steering_force\n\t\t\tf.velocity += steer * settings.alignment\n\n\nif __name__ == \"__main__\":\n\n\tapp = Application()\n\tapp.run()\n", "repo_name": "warbaque/python-steering-behaviors", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 7396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "33", "api": [{"api_name": "settings.TIME_PER_UPDATE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "settings.WIDTH", "line_number": 13, "usage_type": "attribute"}, {"api_name": "settings.HEIGHT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "settings.CELL_SIZE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sfml.Font.from_file", "line_number": 22, "usage_type": "call"}, {"api_name": "sfml.Font", "line_number": 22, "usage_type": "attribute"}, {"api_name": "statistics.Statistics", "line_number": 23, "usage_type": "call"}, {"api_name": "sfml.window.ContextSettings", "line_number": 25, "usage_type": "call"}, {"api_name": "sfml.window", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sfml.RenderWindow", "line_number": 27, "usage_type": "call"}, {"api_name": "sfml.VideoMode", "line_number": 28, "usage_type": "call"}, {"api_name": "sfml.Style", "line_number": 30, "usage_type": "attribute"}, {"api_name": "collision_grid.CollisionGrid", "line_number": 34, "usage_type": "call"}, {"api_name": "sfml.Clock", "line_number": 40, "usage_type": "call"}, {"api_name": "sfml.seconds", "line_number": 41, "usage_type": "call"}, {"api_name": "settings.INITIAL_ENTITIES", "line_number": 42, "usage_type": "attribute"}, {"api_name": "entity.Entity", "line_number": 43, "usage_type": "call"}, {"api_name": "sfml.Vector2", "line_number": 43, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 44, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 45, "usage_type": "call"}, {"api_name": "settings.attractive_mouse", "line_number": 82, "usage_type": "attribute"}, {"api_name": "settings.scary_mouse", "line_number": 85, "usage_type": "attribute"}, {"api_name": "settings.boid_sight_radius", "line_number": 92, "usage_type": "attribute"}, {"api_name": "settings.boid_sight_radius", "line_number": 95, "usage_type": "attribute"}, {"api_name": "settings.desired_separation", "line_number": 98, "usage_type": "attribute"}, {"api_name": "settings.desired_separation", "line_number": 101, "usage_type": "attribute"}, {"api_name": "settings.max_steering_force", "line_number": 104, "usage_type": "attribute"}, {"api_name": "settings.max_steering_force", "line_number": 107, "usage_type": "attribute"}, {"api_name": "settings.separation", "line_number": 110, "usage_type": "attribute"}, {"api_name": "settings.separation", "line_number": 113, "usage_type": "attribute"}, {"api_name": "settings.alignment", "line_number": 116, "usage_type": "attribute"}, {"api_name": "settings.alignment", "line_number": 119, "usage_type": "attribute"}, {"api_name": "settings.cohesion", "line_number": 122, "usage_type": "attribute"}, {"api_name": "settings.cohesion", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sfml.Keyboard", "line_number": 128, "usage_type": "attribute"}, {"api_name": "settings.toggle_help", "line_number": 129, "usage_type": "attribute"}, {"api_name": "settings.delete_entities", "line_number": 130, "usage_type": "attribute"}, {"api_name": "settings.toggle_attractive_mouse", "line_number": 131, "usage_type": "attribute"}, {"api_name": "settings.toggle_scary_mouse", "line_number": 132, "usage_type": "attribute"}, {"api_name": "settings.scatter_boids", "line_number": 133, "usage_type": "attribute"}, {"api_name": "settings.increase_boid_sight_radius", "line_number": 134, "usage_type": "attribute"}, {"api_name": "settings.decrease_boid_sight_radius", "line_number": 135, "usage_type": "attribute"}, {"api_name": "settings.increase_desired_separation", "line_number": 136, "usage_type": "attribute"}, {"api_name": "settings.decrease_desired_separation", "line_number": 137, "usage_type": "attribute"}, {"api_name": "settings.increase_max_steering_force", "line_number": 138, "usage_type": "attribute"}, {"api_name": "settings.decrease_max_steering_force", "line_number": 139, "usage_type": "attribute"}, {"api_name": "settings.increase_separation_factor", "line_number": 140, "usage_type": "attribute"}, {"api_name": "settings.decrease_separation_factor", "line_number": 141, "usage_type": "attribute"}, {"api_name": "settings.increase_alignment_factor", "line_number": 142, "usage_type": "attribute"}, {"api_name": "settings.decrease_alignment_factor", "line_number": 143, "usage_type": "attribute"}, {"api_name": "settings.increase_cohesion_factor", "line_number": 144, "usage_type": "attribute"}, {"api_name": "settings.decrease_cohesion_factor", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sfml.CloseEvent", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sfml.MouseButtonEvent", "line_number": 154, "usage_type": "attribute"}, {"api_name": "entity.Entity", "line_number": 155, "usage_type": "call"}, {"api_name": "sfml.KeyEvent", "line_number": 157, "usage_type": "attribute"}, {"api_name": "settings.boid_sight_radius", "line_number": 210, "usage_type": "attribute"}, {"api_name": "settings.scary_mouse", "line_number": 213, "usage_type": "attribute"}, {"api_name": "sfml.Mouse.get_position", "line_number": 214, "usage_type": "call"}, {"api_name": "sfml.Mouse", "line_number": 214, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 217, "usage_type": "call"}, {"api_name": "settings.ENTITY_SIZE", "line_number": 218, "usage_type": "attribute"}, {"api_name": "settings.ENTITY_SIZE", "line_number": 219, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 220, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 220, "usage_type": "attribute"}, {"api_name": "utility.unit_vector", "line_number": 221, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 221, "usage_type": "attribute"}, {"api_name": "settings.attractive_mouse", "line_number": 225, "usage_type": "attribute"}, {"api_name": "sfml.Mouse.get_position", "line_number": 226, "usage_type": "call"}, {"api_name": "sfml.Mouse", "line_number": 226, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 236, "usage_type": "call"}, {"api_name": "settings.boid_sight_radius", "line_number": 238, "usage_type": "attribute"}, {"api_name": "settings.desired_separation", "line_number": 247, "usage_type": "attribute"}, {"api_name": "settings.desired_separation", "line_number": 248, "usage_type": "attribute"}, {"api_name": "settings.separation", "line_number": 248, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 249, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 249, "usage_type": "attribute"}, {"api_name": "utility.unit_vector", "line_number": 250, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 250, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 259, "usage_type": "call"}, {"api_name": "utility.length", "line_number": 265, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 265, "usage_type": "attribute"}, {"api_name": "utility.unit_vector", "line_number": 266, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 266, "usage_type": "attribute"}, {"api_name": "settings.cohesion", "line_number": 267, "usage_type": "attribute"}, {"api_name": "utility.length", "line_number": 271, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 271, "usage_type": "attribute"}, {"api_name": "utility.unit_vector", "line_number": 272, "usage_type": "call"}, {"api_name": "settings.max_steering_force", "line_number": 272, "usage_type": "attribute"}, {"api_name": "settings.alignment", "line_number": 273, "usage_type": "attribute"}]} +{"seq_id": "6640868427", "text": "\nfrom .models import Contact\nfrom .serializers import ContactSerializer\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\n\n@api_view(['POST'])\ndef allContacts(request):\n contacts = Contact.objects.filter(mailUser=request.data.get('mailUser'))\n serializer = ContactSerializer(contacts, many=True)\n return Response(serializer.data)\n\n\n\n@api_view(['POST'])\ndef addContact(request):\n serializer = ContactSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['PUT'])\ndef updateContact(request):\n contact = Contact.objects.get(id=(request.data.get('id')))\n serializer = ContactSerializer(contact, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['DELETE'])\ndef deleteContact(request):\n try:\n contact = Contact.objects.get(id=(request.data.get('id')))\n except Contact.DoesNotExist:\n return Response({'message': 'ERROR'})\n contact.delete()\n return Response({'message': 'deleted successfuly'})\n", "repo_name": "Tomerre1/My-ALL-Backend-WebApplication", "sub_path": "contact/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "models.Contact.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Contact.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Contact", "line_number": 11, "usage_type": "name"}, {"api_name": "serializers.ContactSerializer", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "serializers.ContactSerializer", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 23, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Contact.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Contact.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Contact", "line_number": 28, "usage_type": "name"}, {"api_name": "serializers.ContactSerializer", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Contact.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Contact.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Contact", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Contact.DoesNotExist", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Contact", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 43, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "2861668688", "text": "import torch.nn\n\nclass ImageSRModel(torch.nn.Module):\n def __init__(self, FACTOR):\n super().__init__()\n self._L1 = torch.nn.Conv2d(1,64, kernel_size=(2,2), dtype=torch.float32) #L1\n self._L2 = torch.nn.Conv2d(64,64, kernel_size=(1,1), dtype=torch.float32) #L2\n self._L3 = torch.nn.Conv2d(64,64, kernel_size=(1,1), dtype=torch.float32) #L3\n self._L4 = torch.nn.Conv2d(64,64, kernel_size=(1,1), dtype=torch.float32) #L4\n self._L5 = torch.nn.Conv2d(64,64, kernel_size=(1,1), dtype=torch.float32) #L5\n self._L6 = torch.nn.Conv2d(64,FACTOR*FACTOR, kernel_size=(1,1), dtype=torch.float32) #L6\n self._DTS = torch.nn.PixelShuffle(FACTOR) #Depth-To-Space\n\n def forward(self, input_batch):\n rot_ens = []\n for i in range(4):\n net_in = torch.rot90(input_batch,i,[2,3])\n \n net_in = torch.nn.functional.relu(self._L1(net_in))\n net_in = torch.nn.functional.relu(self._L2(net_in))\n net_in = torch.nn.functional.relu(self._L3(net_in))\n net_in = torch.nn.functional.relu(self._L4(net_in))\n net_in = torch.nn.functional.relu(self._L5(net_in))\n net_in = self._L6(net_in)\n net_in = self._DTS(net_in)\n\n net_out = torch.rot90(net_in,-i,[2,3])\n rot_ens.append(net_out)\n #print(\"ROTATION: \" + str(i*90))\n final_pred = rot_ens[0] + rot_ens[1] + rot_ens[2] + rot_ens[3]\n final_pred = 0.25 * final_pred\n return final_pred\n\n\n\n ", "repo_name": "RahimD/ImageSR", "sub_path": "ImageSRModel.py", "file_name": "ImageSRModel.py", "file_ext": "py", "file_size_in_byte": 1633, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.nn.nn", "line_number": 3, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 3, "usage_type": "name"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.Conv2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.float32", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn.nn.PixelShuffle", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.rot90", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.nn.functional.relu", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.nn.functional.relu", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.nn.functional.relu", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.nn.functional.relu", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.nn.functional.relu", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.nn", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.rot90", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "4650696234", "text": "import logging\nimport os\nimport sys\nimport time\nfrom http import HTTPStatus\n\nimport requests\nimport telegram\nfrom dotenv import load_dotenv\n\nfrom exceptions import MissingDataException, StatusCodeException\nfrom settings import ENDPOINT, HOMEWORK_STATUSES, RETRY_TIME\n\nload_dotenv()\n\nlogging.basicConfig(\n level=logging.DEBUG,\n filename='main.log',\n format='%(asctime)s, %(levelname)s, %(name)s, %(message)s'\n)\n\nPRACTICUM_TOKEN = os.getenv('TOKEN_PRACTICUM')\nTELEGRAM_TOKEN = os.getenv('TOKEN_TELEGRAM')\nTELEGRAM_CHAT_ID = os.getenv('CHAT_ID_TELEGRAM')\nHEADERS = {'Authorization': f'OAuth {PRACTICUM_TOKEN}'}\n\n\ndef send_message(bot, message):\n \"\"\"Отправляем сообщение в чат.\"\"\"\n try:\n bot.send_message(TELEGRAM_CHAT_ID, message)\n logging.info('Сообщение успешно отправлено')\n except telegram.TelegramError as error:\n logging.error(f'Ошибка отправки сообщения: {error}')\n\n\ndef get_api_answer(current_timestamp):\n \"\"\"Проверяем ответ api.\"\"\"\n timestamp = current_timestamp or int(time.time())\n params = {'from_date': timestamp}\n try:\n response = requests.get(ENDPOINT, headers=HEADERS, params=params)\n except Exception as error:\n logging.error(f'Ошибка при запросе к основному API: {error}')\n if response.status_code != HTTPStatus.OK:\n raise StatusCodeException('Ошибка при запросе к основному API')\n try:\n return response.json()\n except Exception as error:\n logging.error(f'Ошибка преобразования в json: {error}')\n\n\ndef check_response(response):\n \"\"\"Проверяем данные в ответе.\"\"\"\n if type(response) != dict:\n error = 'Тип ответа не словарь'\n raise TypeError(error)\n if 'homeworks' not in response:\n error = f'Отсутствуют данные в:{response}'\n raise MissingDataException(error)\n homework = response['homeworks']\n if type(homework) != list:\n error = 'Тип ответа не список'\n raise TypeError(error)\n logging.info('Статус домашнего задания обновлен')\n return homework[0]\n\n\ndef parse_status(homework):\n \"\"\"Запрашиваем статус работы.\"\"\"\n if 'homework_name' not in homework:\n raise KeyError('Нет ключа \"homework_name\" в ответе API')\n homework_name = homework['homework_name']\n if 'status' not in homework:\n raise KeyError('Нет ключа \"homework_status\" в ответе API')\n homework_status = homework['status']\n if homework_status not in HOMEWORK_STATUSES:\n raise KeyError('Нет ключа \"homework_status\" в словаре статусов')\n verdict = HOMEWORK_STATUSES.get(homework_status)\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n\n\ndef check_tokens():\n \"\"\"Прверяем переменные окружения.\"\"\"\n token_list = [PRACTICUM_TOKEN, TELEGRAM_TOKEN, TELEGRAM_CHAT_ID]\n check_tokens = all(token_list)\n return check_tokens\n\n\ndef main():\n \"\"\"Основная логика работы бота.\"\"\"\n if not check_tokens():\n error = 'Отсутствуют переменные окружения'\n logging.critical(error, exc_info=True)\n sys.exit\n bot = telegram.Bot(token=TELEGRAM_TOKEN)\n current_timestamp = int(time.time())\n status = ''\n while True:\n try:\n response = get_api_answer(current_timestamp)\n except Exception as error:\n message = f'Сбой в работе программы: {error}'\n time.sleep(RETRY_TIME)\n continue\n try:\n if check_response(response):\n homework = check_response(response)\n message = parse_status(homework)\n if message != status:\n send_message(bot, message)\n status = message\n current_timestamp = current_timestamp\n\n except Exception as error:\n message = f'Сбой в работе программы: {error}'\n if message != status:\n send_message(bot, message)\n status = message\n logging.error(error, exc_info=True)\n finally:\n time.sleep(RETRY_TIME)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "aoamosova/homework_bot", "sub_path": "homework.py", "file_name": "homework.py", "file_ext": "py", "file_size_in_byte": 4519, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "telegram.TelegramError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "settings.ENDPOINT", "line_number": 42, "usage_type": "argument"}, {"api_name": "logging.error", "line_number": 44, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 45, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 45, "usage_type": "name"}, {"api_name": "exceptions.StatusCodeException", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 50, "usage_type": "call"}, {"api_name": "exceptions.MissingDataException", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "settings.HOMEWORK_STATUSES", "line_number": 77, "usage_type": "name"}, {"api_name": "settings.HOMEWORK_STATUSES.get", "line_number": 79, "usage_type": "call"}, {"api_name": "settings.HOMEWORK_STATUSES", "line_number": 79, "usage_type": "name"}, {"api_name": "logging.critical", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "attribute"}, {"api_name": "telegram.Bot", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "settings.RETRY_TIME", "line_number": 104, "usage_type": "argument"}, {"api_name": "logging.error", "line_number": 120, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "settings.RETRY_TIME", "line_number": 122, "usage_type": "argument"}]} +{"seq_id": "29630472097", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\ninstall_requires = open(\"requirements.txt\").read().split('\\n')\nreadme_content = open(\"README.rst\").read()\n\ndef gen_data_files(package_dir, subdir):\n import os.path\n results = []\n for root, dirs, files in os.walk(os.path.join(package_dir, subdir)):\n results.extend([os.path.join(root, f)[len(package_dir)+1:] for f in files])\n return results\n\nino_package_data = gen_data_files('ino', 'make') + gen_data_files('ino', 'templates')\n\nsetup(\n name='ino',\n version='0.3.7',\n description='Command line toolkit for working with Arduino hardware',\n long_description=readme_content,\n author='Victor Nakoryakov, Amperka Team',\n author_email='victor@amperka.ru',\n license='MIT',\n keywords=\"arduino build system\",\n url='http://inotool.org',\n packages=['ino', 'ino.commands'],\n scripts=['bin/ino'],\n package_data={'ino': ino_package_data},\n install_requires=install_requires,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Embedded Systems\",\n ],\n)\n", "repo_name": "amperka/ino", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1085, "dataset": "github-code", "pt": "41", "api": [{"api_name": "os.path.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 13, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "38960274152", "text": "from transformers import pipeline\r\nsummarizer = pipeline(\"summarization\", model = \"pablo-chocobar/summarizer\")\r\nimport sys \r\ndef summarize(arg1):\r\n x = arg1\r\n \r\n def clean(Article_text):\r\n lines_list = []\r\n\r\n for line in Article_text.split(\"\\n\"):\r\n\r\n stripped_line = line.strip()\r\n \r\n if len(stripped_line) >= 100:\r\n lines_list.append(stripped_line)\r\n return \" \".join(lines_list)\r\n \r\n x = clean(x)\r\n article_text = \"\"\" \"\"\"\r\n for i in x:\r\n article_text += i\r\n\r\n import re\r\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\r\n article_text = re.sub(r'\\s+', ' ', article_text)\r\n\r\n article_text1 = article_text[:1000]\r\n summary = summarizer(article_text1, max_length=200, min_length=100, do_sample=False)\r\n\r\n b = summary[0][\"summary_text\"]\r\n return b\r\n\r\nif __name__ == \"__main__\":\r\n summarize(sys.argv[1])\r\n", "repo_name": "pablo-chocobar/energyfeed-summarizer", "sub_path": "summarizer.py", "file_name": "summarizer.py", "file_ext": "py", "file_size_in_byte": 933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "transformers.pipeline", "line_number": 2, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "7448670227", "text": "from django import forms\nfrom django_countries.fields import CountryField\nfrom django_countries.widgets import CountrySelectWidget\n\nfrom core.models import Estimate\n\nPAYMENT_CHOICES = (\n ('S', 'Stripe'),\n ('P', 'PayPal')\n)\n\n\nclass CheckoutForm(forms.Form):\n shipping_address = forms.CharField(required=False)\n shipping_address2 = forms.CharField(required=False)\n shipping_country = CountryField(blank_label='(select country)').formfield(\n required=False,\n widget=CountrySelectWidget(attrs={\n 'class': 'custom-select d-block w-100',\n }))\n shipping_zip = forms.CharField(required=False)\n\n billing_address = forms.CharField(required=False)\n billing_address2 = forms.CharField(required=False)\n billing_country = CountryField(blank_label='(select country)').formfield(\n required=False,\n widget=CountrySelectWidget(attrs={\n 'class': 'custom-select d-block w-100',\n }))\n billing_zip = forms.CharField(required=False)\n\n same_billing_address = forms.BooleanField(required=False)\n set_default_shipping = forms.BooleanField(required=False)\n use_default_shipping = forms.BooleanField(required=False)\n set_default_billing = forms.BooleanField(required=False)\n use_default_billing = forms.BooleanField(required=False)\n\n payment_option = forms.ChoiceField(\n widget=forms.RadioSelect, choices=PAYMENT_CHOICES)\n\n\nclass EstimateForm(forms.ModelForm):\n class Meta:\n model = Estimate\n exclude = ['requester', 'shipping_address',\n 'item',\n 'quantitys',\n 'quantitym',\n 'quantityl', ]\n labels = {\n 'shipping_address': 'Destination Address',\n 'item': \"Item\",\n 'quantitys': 'How Many Smalls?',\n 'quantitym': 'How Many Mediums?',\n 'quantityl': 'How Many Larges?',\n 'notes': \"Inquiry\"\n\n }\n # help_texts={\n # 'quantitys': \"Default is 0\",\n # 'quantitym': 'Default is 0',\n # 'quantityl': \"Default is 0\"\n # }\n widgets = {\n 'notes': forms.Textarea(attrs={'rows': 4, 'cols': 100})\n }\n\n\nclass CouponForm(forms.Form):\n code = forms.CharField(widget=forms.TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Promo code',\n 'aria-label': 'Recipient\\'s username',\n 'aria-describedby': 'basic-addon2'\n }))\n\n\nclass RefundForm(forms.Form):\n ref_code = forms.CharField()\n message = forms.CharField(widget=forms.Textarea(attrs={\n 'rows': 4\n }))\n email = forms.EmailField()\n\n\nclass PaymentForm(forms.Form):\n stripeToken = forms.CharField(required=False)\n save = forms.BooleanField(required=False)\n use_default = forms.BooleanField(required=False)\n", "repo_name": "hussainjhaveri/hulirip", "sub_path": "core/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "django.forms.Form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 15, "usage_type": "name"}, {"api_name": "django_countries.fields.CountryField", "line_number": 16, "usage_type": "call"}, {"api_name": "django_countries.widgets.CountrySelectWidget", "line_number": 18, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django_countries.fields.CountryField", "line_number": 25, "usage_type": "call"}, {"api_name": "django_countries.widgets.CountrySelectWidget", "line_number": 27, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 30, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 35, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 36, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "django.forms.RadioSelect", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 39, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 42, "usage_type": "name"}, {"api_name": "core.models.Estimate", "line_number": 44, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 65, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 65, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 69, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms.Form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 78, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 79, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 79, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 80, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 80, "usage_type": "call"}, {"api_name": "django.forms.EmailField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 83, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 86, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 87, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 88, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "25411443814", "text": "from flask import Blueprint, jsonify, request\nimport gestion_salutations\nfrom flask_cors import CORS # Ensure you import this if you're using CORS here\n\nsalutations_api = Blueprint('salutations_api', __name__)\nCORS(salutations_api, resources={r\"/*\": {\"origins\": \"*\"}}, methods=['GET', 'POST', 'PUT', 'DELETE'])\n\n\n@salutations_api.route('/', methods=['POST'])\ndef add_salutation():\n salutation = request.json.get('salutation')\n success = gestion_salutations.ajouter_salutation(salutation) # Get the result of the insertion attempt\n \n if success:\n return jsonify(message=f\"Salutation '{salutation}' ajoutée avec succès.\")\n else:\n return jsonify(message=f\"Salutation '{salutation}' already exists.\"), 409 # 409 is the HTTP status code for \"Conflict\"\n\n@salutations_api.route('/', methods=['GET'])\ndef get_specific_salutation(salutation_id):\n salutation = gestion_salutations.visualiser_salutation(salutation_id)\n if salutation:\n return jsonify(salutation=salutation)\n else:\n return jsonify(message=\"Salutation not found\"), 404\n\n@salutations_api.route('/', methods=['GET'])\ndef get_salutations():\n salutations = gestion_salutations.afficher_salutations()\n print(\"Salutations from the endpoint:\", salutations) # Debugging line\n return jsonify(salutations=salutations)\n\n@salutations_api.route('/', methods=['PUT'])\ndef update_salutation(salutation_id):\n nouvelle_salutation = request.json.get('salutation')\n if not nouvelle_salutation:\n return jsonify(message=\"La nouvelle salutation n'est pas fournie.\"), 400\n\n try:\n gestion_salutations.modifier_salutation(salutation_id, nouvelle_salutation)\n return jsonify(message=\"Salutation modifiée avec succès.\")\n except Exception as e:\n return jsonify(message=f\"Erreur lors de la mise à jour: {str(e)}\"), 500\n\n\n@salutations_api.route('/', methods=['DELETE'])\ndef delete_salutation(salutation_id):\n try:\n gestion_salutations.supprimer_salutation(salutation_id)\n return jsonify(message=\"Salutation supprimée avec succès.\")\n except Exception as e:\n return jsonify(message=f\"Erreur lors de la suppression: {str(e)}\"), 500\n\n\n", "repo_name": "CasparRitchie/CasparRitchie.github.io", "sub_path": "idrv3_back/routes/salutations_routes.py", "file_name": "salutations_routes.py", "file_ext": "py", "file_size_in_byte": 2251, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "gestion_salutations.ajouter_salutation", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 17, "usage_type": "call"}, {"api_name": "gestion_salutations.visualiser_salutation", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "gestion_salutations.afficher_salutations", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}, {"api_name": "gestion_salutations.modifier_salutation", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "gestion_salutations.supprimer_salutation", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "35625037224", "text": "import argparse\nfrom multiprocessing import Pool\n\nimport pandas\n\n\ndef is_tweet_english(text: str):\n raise NotImplementedError\n\n\ndef is_tweet_referring_to_another(text: str):\n if 'https://t.co' in text:\n return True\n else:\n return False\n\n\ndef is_tweet_valid(tweet: (int, str)):\n print('Analyzing tweet #{0}'.format(tweet[0]))\n return is_tweet_english(tweet[1]) and not is_tweet_referring_to_another(tweet[1])\n\n\ndef clean_dataset(dataset_path: str, output_path: str):\n dataset = pandas.read_csv(dataset_path, delimiter=' ', quotechar='|')\n tweets = dataset['Text']\n\n pool = Pool()\n pool.map(is_tweet_valid, enumerate(tweets))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Cleans given dataset from non-english tweets and'\n 'tweets that are referencing to another tweet.')\n parser.add_argument('-d', '--dataset-path', required=True,\n type=str, help='Path to dataset CSV file.')\n parser.add_argument('-o', '--output-path', required=True,\n type=str, help='Path to where a cleaned dataset will be written.')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n clean_dataset(args.dataset_path, args.output_path)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "m-ciesielski/text-emotion-analysis", "sub_path": "utils/clean_dataset.py", "file_name": "clean_dataset.py", "file_ext": "py", "file_size_in_byte": 1330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 27, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "23675683396", "text": "from urllib.parse import quote, urlsplit\n\nfrom django.conf import settings\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nBASE_URL = \"https://www.allmusic.com\"\n# It looks like the service doesn't return results if the user agent isn't mocked\nUSER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/116.0\"\n\n\nclass ScrappingError(Exception):\n pass\n\n\nclass Scrapper:\n def get_artist_image(self, artist_name):\n artist_search = quote(artist_name)\n headers = {\n \"User-Agent\": USER_AGENT,\n }\n search_response = requests.get(\n f\"{BASE_URL}/search/artists/{artist_search}\", headers=headers\n )\n search_response.raise_for_status()\n image_url = SearchPage(search_response.content).get_image_url()\n\n with requests.get(image_url, stream=True) as image_response:\n image_response.raise_for_status()\n image_path = settings.MEDIA_ROOT / artist_name\n image_extension = urlsplit(image_url).path.split(\".\")[-1]\n file_name = f\"{image_path}.{image_extension}\"\n with open(file_name, \"wb\") as f:\n for chunk in image_response.iter_content(chunk_size=8192):\n f.write(chunk)\n\n return file_name\n\n\nclass PageParsingError(Exception):\n pass\n\n\nclass SearchPage:\n def __init__(self, html_doc):\n self.soup = BeautifulSoup(html_doc, \"html.parser\")\n\n def get_image_url(self):\n \"\"\"\n Apply the following heuristics: assume the first result in the search list is the right one\n \"\"\"\n try:\n search_results = self.soup.find(class_=\"search-results\")\n artist_li = search_results.find_all(\"li\")[0]\n photo_div = artist_li.find(class_=\"photo\")\n img = photo_div.find(\"img\")\n return img[\"src\"]\n except:\n raise PageParsingError\n", "repo_name": "manugrandio/artists-api", "sub_path": "artistsapi/artists/utils/artistimagescrapper.py", "file_name": "artistimagescrapper.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "urllib.parse.quote", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "urllib.parse.urlsplit", "line_number": 32, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "36497197000", "text": "\n\nimport argparse\nimport os\nimport numpy as np\nimport cv2\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import arg_scope\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_v1\nfrom tensorflow.contrib.slim.python.slim.nets import inception_v1\nfrom tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_base\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.python.ops import variable_scope\nimport time\n'''\nextract feature of test images using resnet pre-train model\n'''\n\n\ndef extract_feature(image_list, pool5, image_holder, preprocess, model_path, image_dir, feat_dir):\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n tfconfig.gpu_options.allow_growth = True\n sess = tf.Session(config=tfconfig)\n\n init(model_path, sess)\n print('Done Init! ')\n net_time, cnt = 0, 0\n for i, index in enumerate(image_list):\n feat_name = os.path.join(feat_dir, index.split('.')[0] + '.npz')\n image_name = os.path.join(image_dir, index)\n lockname = feat_name + '.lock'\n if os.path.exists(feat_name):\n continue\n if os.path.exists(lockname):\n continue\n try:\n os.makedirs(lockname)\n except:\n continue\n t = time.time()\n cnt += 1\n\n image = preprocess(image_name) #\n if image is None:\n print('no image')\n continue\n feat = run_feat(sess, pool5, image_holder, image)\n if not os.path.exists(os.path.dirname(feat_name)):\n try:\n os.makedirs(os.path.dirname(feat_name))\n print('## Make Directory: %s' % feat_name)\n except:\n pass\n np.savez_compressed(feat_name, feat=feat)\n net_time += time.time() - t\n if i % 1000 == 0:\n print('extracting feature [%d / %d] %s (%f sec)' % (i, len(image_list), feat_name, net_time / cnt * 1000),\n feat.shape)\n net_time = 0\n cnt = 0\n cmd = 'rm -r %s' % lockname\n os.system(cmd)\n\n\ndef init(model_path, sess):\n def get_variables_in_checkpoint_file(file_name):\n reader = tf.pywrap_tensorflow.NewCheckpointReader(file_name)\n # reader.get_tensor()\n var_to_shape_map = reader.get_variable_to_shape_map()\n return var_to_shape_map, reader\n\n var_keep_dic, reader = get_variables_in_checkpoint_file(model_path)\n my_var_list = tf.global_variables()\n sess.run(tf.variables_initializer(my_var_list, name='init'))\n variables_to_restore = []\n my_dict = {}\n for v in my_var_list:\n name = v.name.split(':')[0]\n my_dict[name] = 0\n if name not in var_keep_dic:\n print('He does not have', name)\n else:\n if v.shape != var_keep_dic[name]:\n print('Does not match shape: ', v.shape, var_keep_dic[name])\n continue\n variables_to_restore.append(v)\n for name in var_keep_dic:\n if name not in my_dict:\n print('I do not have ', name)\n restorer = tf.train.Saver(variables_to_restore)\n restorer.restore(sess, model_path)\n print('Initialized')\n\n# image preprocess\ndef preprocess_res50(image_name):\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n image = cv2.imread(image_name)\n if image is None:\n return None\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n target_size = 256\n crop_size = 224\n im_size_min = np.min(image.shape[0:2]) # number of image channels\n im_scale = float(target_size) / float(im_size_min)\n\n # cv2.resize(src, dsize, dst=None, fx=None, fy=None, interpolation=None)\n # input: orignal image (w*h*c); dsize: output size; fx: ; fy: ; interpolation: ;\n image = cv2.resize(image, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)\n height = image.shape[0]\n width = image.shape[1]\n x = int((width - crop_size) / 2)\n y = int((height - crop_size) / 2)\n image = image[y: y + crop_size, x: x + crop_size] # crop image\n\n image = image.astype(np.float32)\n image[:, :, 0] -= _R_MEAN\n image[:, :, 1] -= _G_MEAN\n image[:, :, 2] -= _B_MEAN\n image = image[np.newaxis, :, :, :]\n return image\n\n\n\ndef run_feat(sess, pool5, image_holder, image):\n feat = sess.run(pool5, feed_dict={image_holder: image})\n feat = np.squeeze(feat)\n # exit()\n return feat\n\n\ndef resnet_arg_scope(is_training=True,\n batch_norm_decay=0.997,\n batch_norm_epsilon=1e-5,\n batch_norm_scale=True):\n batch_norm_params = {\n 'is_training': False,\n 'decay': batch_norm_decay,\n 'epsilon': batch_norm_epsilon,\n 'scale': batch_norm_scale,\n 'trainable': False,\n 'updates_collections': tf.GraphKeys.UPDATE_OPS\n }\n with arg_scope(\n [slim.conv2d],\n weights_initializer=slim.variance_scaling_initializer(),\n trainable=is_training,\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:\n return arg_sc\n\n\n\n\ndef res50():\n image = tf.placeholder(tf.float32, [None, 224, 224, 3], 'image')\n with slim.arg_scope(resnet_arg_scope(is_training=False)):\n net_conv, end_point = resnet_v1.resnet_v1_50(image, global_pool=True, is_training=False)\n return net_conv, image\n\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_root', type=str, default='../../data', help='root directory')\n parser.add_argument('--data_dir', type=str, default='AZSL-G', help='data directory')\n parser.add_argument('--dataset', type=str, default='ImNet_A', help='ImNet_A, AwA')\n parser.add_argument('--gpu', type=str, default='1',\n help='gpu device')\n args = parser.parse_args()\n\n\n DATA_DIR = os.path.join(args.data_root, args.data_dir)\n DATASET = args.dataset\n # pre-train model\n MODEL_PATH = os.path.join(DATA_DIR, 'materials', 'resnet_v1_50.ckpt')\n IMAGE_FILE = os.path.join(DATA_DIR, DATASET, 'test_img_list.txt')\n\n if DATASET == 'ImNet_A':\n Image_DIR = os.path.join(args.data_root, 'images', 'ImNet_A')\n if DATASET == 'AwA':\n Image_DIR = os.path.join(args.data_root, 'images', 'Animals_with_Attributes2/JPEGImages')\n\n SAVE_DIR = os.path.join(DATA_DIR, DATASET, 'Test_DATA_feats')\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n # load cnn model\n pool5, image_holder = res50()\n preprocess = preprocess_res50\n\n image_list, label_list = [], []\n with open(IMAGE_FILE) as fp:\n for line in fp.readlines():\n index, label = line.split()\n image_list.append(index) # list of images\n label_list.append(int(label))\n\n extract_feature(image_list, pool5, image_holder, preprocess, MODEL_PATH, Image_DIR, SAVE_DIR)\n", "repo_name": "genggengcss/X-ZSL", "sub_path": "AZSL-G/src/extract_img_feats.py", "file_name": "extract_img_feats.py", "file_ext": "py", "file_size_in_byte": 7041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tensorflow.ConfigProto", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 50, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.savez_compressed", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 57, "usage_type": "call"}, {"api_name": "os.system", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.pywrap_tensorflow.NewCheckpointReader", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.pywrap_tensorflow", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.variables_initializer", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim.arg_scope", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.conv2d", "line_number": 148, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim", "line_number": 148, "usage_type": "name"}, {"api_name": "tensorflow.contrib.slim.variance_scaling_initializer", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim", "line_number": 149, "usage_type": "name"}, {"api_name": "tensorflow.nn", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim.batch_norm", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim", "line_number": 152, "usage_type": "name"}, {"api_name": "tensorflow.contrib.slim.arg_scope", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.batch_norm", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim", "line_number": 154, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 161, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 161, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.slim.arg_scope", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim", "line_number": 162, "usage_type": "name"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1.resnet_v1_50", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1", "line_number": 163, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 193, "usage_type": "attribute"}]} +{"seq_id": "19646824584", "text": "# https://github.com/skathirmani/data-scraping\n\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport json\nimport pandas as pd\nimport os\nimport glob\n\nconsumer_key = \"eVazopG0C6MPuJruABnb9Ojuo\"\nconsumer_secret = \"bRDPy1fV5ROPG5t5bCFtNxyAzbhMqUOz6aLsnhfLpmxUh3SJHs\"\n\naccess_token = \"873805520112459776-R8U5mUVRSs8hnXHYukrvtPe2crmSa7b\"\naccess_token_secret = \"7qq4TUEeimDlV3KfFPDp2gq030ECb7UnckpcLKuN3mD3j\"\n\n\nclass StdOutListener(StreamListener):\n\n def on_data(self, data):\n print('----')\n tweet = json.loads(data)\n tweet_df = pd.DataFrame({\n 'user_name': [tweet['user']['name']],\n 'user_handler': [tweet['user']['screen_name']],\n 'text':[tweet['text']],\n 'created_at':[tweet['created_at']],\n 'user_location':[tweet['user']['location']],\n 'source':[tweet['source']]\n \n })\n file_name = 'tweets_datascience.csv'\n file_cwd = glob.glob('*.csv')\n print(os.getcwd())\n if file_name in file_cwd:\n with open(file_name, 'a') as f:\n print('appending')\n try:\n tweet_df.to_csv(f, index=False, header=False)\n except UnicodeEncodeError: \n pass\n \n else:\n print('creating a file')\n \n try:\n tweet_df.to_csv(file_name,index=False,encoding='utf-8')\n except UnicodeEncodeError:\n pass\n \n \n \n \n \n\n def on_error(self, status):\n print (status)\n\n\nif __name__ == '__main__':\n\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n stream.filter(track=['#TuesdayThoughts'])", "repo_name": "iftikaraliahmed/datascrapingwrangling", "sub_path": "twitter_streaming.py", "file_name": "twitter_streaming.py", "file_ext": "py", "file_size_in_byte": 1901, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tweepy.streaming.StreamListener", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 34, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 63, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "34919385747", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# retraining.py\n# Created on January 19, 2017.\n\"\"\"\n\"\"\"\nfrom __future__ import print_function\nfrom argparse import ArgumentParser\nimport collections\nimport pickle\nimport shelve\nimport sys\nimport warnings\nimport math \nimport pickle\nimport random\nimport time\nimport sys\nimport multiprocessing\nimport os\n\nimport numpy\nimport pylab\nfrom sklearn import datasets, metrics\nfrom sklearn.ensemble import RandomForestClassifier as RFC\nfrom sklearn.svm import SVC\n#from sklearn.svm import LinearSVC\nfrom sklearn.externals import joblib\nfrom functools import partial\n\nMODEL = pickle.load(open(\"path to model\", \"rb\"))\nfeat_hierarchy = pickle.load(open(\"feat_hierarchy.pickle\", \"r\"))\n\n\nL = 3\nLAM = 0.01\nn_seed = 40\nn_window = 3000\nN = 30000\nallplot = []\nallf = []\n\nremove_list = []\nfeat_list = []\nfor i in range(0, 961):\n feat_list.append(i)\n#remove_list = [77 ,94, 95, 96, 108, 235, 334, 604, 605, 606, 5798, 6083]\n#remove_list = [77,94,95,96,235,334,604,607,622,624]\n#remove_list = [77,94,95,96,108,235,334,604,606,607,622,624]\n#remove_list = [65, 80, 81, 82, 94, 265, 346]\nremove_list = []\nfor x in remove_list:\n feat_list.remove(x)\n\n###############################################################################\n# auxiliary functions\n\ndef is_leaf(feat_num):\n if feat_hierarchy[feat_num][2] == 'leaf':\n return True\n else:\n return False\n\ndef is_leaf_parent(feat_num):\n if feat_hierarchy[feat_num][2] == 'leaf_parent':\n return True\n else:\n return False \n\ndef is_other(feat_num):\n if feat_hierarchy[feat_num][2] == 'others':\n return True\n else:\n return False \n\ndef child_leaf_self(feat_num):\n # this function is for leaf-parent features\n # return the leaf children of the giving feature\n child_feat_seq = [feat_num]\n for child in feat_hierarchy[feat_num][1]:\n #if is_leaf(feat_hierarchy[child-1]):\n if feat_hierarchy[child-1][2] == 'leaf':\n child_feat_seq.append(child-1)\n return child_feat_seq\n\ndef parent_self(feat_num):\n parent_feat_seq = [feat_num]\n for parent in feat_hierarchy[feat_num][0]:\n parent_feat_seq.append(parent-1)\n return parent_feat_seq\n\n# The Quadratic cost function\ndef quad_cost(x, xj):\n l = 0;\n for i in range(0, len(x)):\n l = l + (x[i]-xj[i])**2\n a = 1\n #if 73 <= feat <= 77 or 110 <= feat <= 336 or 598 <= feat < 694 or 1507 <= feat <= 1777 or 2666 <= feat < 2769 or 4148 <= feat <= 4247 or 4408 <= feat < 4913 or 5777 <= feat < 5879 or 5901 <= feat < 5990:\n # a = 1\n return 0.5*a*LAM*l\n\n# The exponential cost function\ndef expo_cost(x, xj):\n l = 0\n for i in range(0, len(x)):\n l = l + (x[i] - xj[i])**2\n return math.exp(LAM*math.sqrt(l+1)) \n\n# Define the hypo of the classifier as fx\ndef func(x):\n #model = pickle.load(open(model_path, \"rb\"))\n X = [x]\n y = MODEL.decision_function(X)\n r = list(y)\n return r[0] \n\n# Objective function of attcker\n#def Q(x, xj):\n# return func(x) + quad_cost(x, xj)\n\n# Convert feature vector to libsvm strings\ndef vec2str(x):\n lib_str = \"1\"\n for i in range(0, len(x)):\n if x[i] == 1:\n lib_str = lib_str + \" \"+str(i+1)+\":1\" \n return lib_str\n \n# Flip the i th feature (0 <= i < 6087) \ndef flip(x, i):\n xk = x[:]\n if xk[i] == 1:\n xk[i] = 0\n else:\n xk[i] = 1\n return xk \n\n# Flip with consideration of feature structure\ndef flip_with_struct(x, i):\n xk = x[:]\n if is_other(i):\n if xk[i] == 1:\n xk[i] = 0\n else:\n xk[i] = 1\n if is_leaf(i):\n if xk[i] == 1:\n xk[i] = 0\n else:\n flip_list = parent_self(i)\n for j in flip_list:\n xk[j] = 1\n if is_leaf_parent(i):\n if xk[i] == 1:\n flip_list = child_leaf_self(i)\n for j in flip_list:\n xk[j] = 0\n else:\n xk[i] = 1\n return xk \n\n# Convert a libsvm strings to a feature vector\ndef str2vec(lib_str):\n vec = [0]*961 \n on = 0\n tmp = ''\n for i in range(0, len(lib_str)):\n if lib_str[i] == ':':\n on = 0\n vec[int(tmp)-1] = 1\n tmp = ''\n if on == 1:\n tmp = tmp + lib_str[i]\n if lib_str[i] == ' ':\n on = 1 \n return vec\n \n############################################################################### \n# Coordinated_greedy algorithm\ndef coor_greedy(str_pair, sort):\n # First transefer the string to a feature vector\n xj = str2vec(str_pair[0])\n opt_pool = []\n values = []\n opt_sol = ''\n # Choose L random starting points\n for i in range(0, L):\n xk = xj[:]\n fk = func(xk)\n ck = 0\n Qk = fk + ck\n n_converge = 0\n\n while n_converge <= n_window:\n \n rand_sel = random.randint(0, len(feat_list)-1)\n feat = feat_list[rand_sel]\n \n #print (feat)\n #feat = random.randint(0,6086)\n xl = flip(xk, feat)\n #xl = flip(xk, feat) \n\n fl = func(xl)\n cl = quad_cost(xl, xj)\n Ql = fl + cl\n \n #if Q(xl, xj) < Q(xk, xj):\n if Ql < Qk:\n xk = xl[:]\n n_converge = 0\n Qk = Ql\n fk = fl\n ck = cl\n else:\n n_converge += 1\n #print func(xk)\n #if func(xk) < 0:\n\n if fk < 0:\n opt_pool.append(xk)\n #values.append(Q(xk, xj))\n values.append(Qk) \n if len(opt_pool) > 0:\n min_index = values.index(min(values))\n opt_vec = opt_pool[min_index]\n opt_sol = vec2str(opt_vec)\n #print func(opt_vec)\n else:\n opt_sol = ''\n return opt_sol \n\n###############################################################################\n# the main function of retraining\n# 1. use coordinated-greedy to generate new instances\n# 2. check new instances\n# 3. train the new dataset \ndef main():\n # CG feature pool score\n f = open('/home/tongl/fe/score.txt','r')\n score = f.readlines()\n f.close()\n for i in range(0, len(score)):\n score[i] = int(score[i])\n sorted_score = sorted(range(len(score)), key=lambda k: -1*score[k]) \n\n # import the retraining seeds\n f = open('/****path to directory****/data/Hidost/Hidost-retr-seed.libsvm','r')\n line_all = f.readlines()\n f.close()\n seeds = line_all[0:n_seed]\n \n # import the retraining targets\n f = open('/****path to directory****/data/Hidost/Hidost-train.libsvm','r')\n line_all = f.readlines()\n f.close()\n targets = []\n for i in range(0, n_seed):\n seq = random.randint(5586, 10081)\n targets.append(line_all[seq])\n\n inputs = []\n for i in range(0, n_seed):\n inputs.append([seeds[i], targets[i]])\n #print (inputs[0][0])\n #print (inputs[0][1])\n\n i = 1\n while True:\n # use CG to generate adversarial instances with multiprocessing\n \n print (\"#####################################################\")\n print ('Iteration', i)\n start_time = time.time()\n cores = multiprocessing.cpu_count()\n adv_ins = []\n pool = multiprocessing.Pool(processes=cores)\n sum_num = 0.0\n partial_cg = partial(coor_greedy, sort = sorted_score)\n for y in pool.imap(partial_cg, inputs):\n #print (func(str2vec(y))) \n if y != \"\":\n adv_ins.append(y+'\\n')\n sum_num += func(str2vec(y))\n pool.close()\n pool.join()\n #adv_ins[-1] = adv_ins[-1].strip() \n print ('Multiple process:', time.time() - start_time, 's')\n if len(adv_ins) != 0:\n print ('Average value:', sum_num/len(adv_ins))\n print ('The number of instances added:', len(adv_ins))\n \n # check the new instances\n if len(adv_ins) != 0:\n ins_path = \"/****path to directory****/data/Hidost/M40L001/adv_ins_\"+str(i)+\".libsvm\"\n ins_add = open(ins_path, 'w')\n for ins in adv_ins:\n ins_add.write(\"%s\" % ins)\n else:\n print(\"#####################################################\")\n print(\"The retraining is terminated at iteration %d\" % i) \n break\n \n # train with the new dataset \n # copy the old training set\n training_pre = '/****path to directory****/data/Hidost/M40L001/train-'+str(i-1)+'.libsvm'\n training_cur = '/****path to directory****/data/Hidost/M40L001/train-'+str(i)+'.libsvm'\n cmd = 'cp' + ' ' + training_pre + ' ' + training_cur\n os.system(cmd)\n # add the adversarial instance in the new training set\n f = open(training_cur, 'a')\n for ins in adv_ins:\n f.write(ins)\n f.close()\n \n train_fs = ['/****path to directory****/data/Hidost/M40L001/train-'+str(i)+'.libsvm']\n test_fs = ['/****path to directory****/data/Hidost/Hidost-test.libsvm']\n \n print('Performing experiment')\n #print('train_fs: %s' % train_fs)\n #print('test_fs: %s' % test_fs)\n for w, (f_tr, f_te) in enumerate(zip(train_fs, test_fs), start=1):\n # Load test dates\n \n #dates = numpy.array(load_dates(f_te))\n #week_s, week_e = dates.min(), dates.max()\n #key_dates.append(week_s)\n #print('\\nPeriod {} [{} - {}]'.format(w, week_s, week_e))\n \n # Load training data\n #print('f_tr: %s' % f_tr)\n #print('f_te: %s' % f_te)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n #X_tr, y_tr = datasets.load_flight_file(f_tr)\n X_tr, y_tr = datasets.load_svmlight_file(f_tr, n_features=961)\n \n print(X_tr.shape)\n X_tr.data = numpy.ones_like(X_tr.data)\n X_tr = X_tr.toarray()\n clf = SVC(kernel='rbf', gamma=0.0025, C=12)\n sample_weight = None\n clf.fit(X_tr, y_tr, sample_weight=sample_weight)\n pickle.dump(clf, open(\"/****path to directory****/exper/Hidost/M40L001/model-\"+str(i)+\".pickle\", 'wb+'))\n global MODEL\n MODEL = pickle.load(open(\"/****path to directory****/exper/Hidost/M40L001/model-\"+str(i)+\".pickle\", \"rb\"))\n i += 1\n \nif __name__ == \"__main__\":\n main()\n \n", "repo_name": "vu-aml/conserved-feature", "sub_path": "src/feature_space_retraining/FSR.py", "file_name": "FSR.py", "file_ext": "py", "file_size_in_byte": 10553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pickle.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 32, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 107, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 107, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 196, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 259, "usage_type": "call"}, {"api_name": "time.time", "line_number": 274, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 275, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 277, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 279, "usage_type": "call"}, {"api_name": "time.time", "line_number": 288, "usage_type": "call"}, {"api_name": "os.system", "line_number": 309, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 333, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 334, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_svmlight_file", "line_number": 336, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 336, "usage_type": "name"}, {"api_name": "numpy.ones_like", "line_number": 339, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 341, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 344, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 346, "usage_type": "call"}]} +{"seq_id": "29618278064", "text": "from antares.apps.core.middleware.request import get_request\nimport logging\n\nfrom ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom ..constants import TransactionEffectType\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TransactionType(models.Model):\n id = models.SlugField(primary_key=True, max_length=40)\n inverse_transaction_type = models.ForeignKey(\n 'self',\n on_delete=models.PROTECT,\n db_column='inverse_transaction_type',\n blank=True,\n null=True)\n active = models.BooleanField(default=True)\n calculate_charges = models.BooleanField(default=True)\n description = RichTextField(blank=True, null=True)\n effect = models.CharField(choices=TransactionEffectType.choices, max_length=6)\n transaction_type_name = models.CharField(max_length=100)\n post_zeros = models.BooleanField(default=True)\n hrn_script = models.TextField(blank=True, null=True)\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n editable=False)\n creation_date = models.DateTimeField(null=True, editable=False)\n update_date = models.DateTimeField(null=True, editable=False)\n\n def save(self, *args, **kwargs):\n if self.creation_date is None:\n self.creation_date = timezone.now()\n self.update_date = timezone.now()\n self.author = get_request().user\n super(TransactionType, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.id\n\n class Meta:\n app_label = 'accounting'\n db_table = 'acc_transaction_type'\n verbose_name = _(__name__ + \".table_name\")\n verbose_name_plural = _(__name__ + \".table_name_plural\")\n", "repo_name": "SurferTank/antares-apps", "sub_path": "antares/apps/accounting/models/transaction_type.py", "file_name": "transaction_type.py", "file_ext": "py", "file_size_in_byte": 1866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "ckeditor.fields.RichTextField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "constants.TransactionEffectType.choices", "line_number": 27, "usage_type": "attribute"}, {"api_name": "constants.TransactionEffectType", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 42, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 42, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 43, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 43, "usage_type": "name"}, {"api_name": "antares.apps.core.middleware.request.get_request", "line_number": 44, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "14113594106", "text": "import MySQLdb\n\nfrom app.database.database import DatabaseConnection\n\nconnection = DatabaseConnection()\ndb, cursor = connection.db, connection.cursor\n\n\ndef get_quotes_for_date(quote_date):\n \"\"\"\n Get all quotes for a particular IRC chat date from the database\n :param quote_date: A string representing the date in the format YYYY-MM-DD\n :return: A hash set containing all quotes for a particular date\n \"\"\"\n quotes_set = set()\n\n try:\n cursor.execute(\n u\"SELECT quote FROM GNUeSummaryParaQuotes \"\n u\"WHERE quote_date=%s\", (quote_date,)\n )\n para_quotes = cursor.fetchall()\n\n for (quote,) in para_quotes:\n quotes = quote.split('-')\n for q in quotes:\n q = q.strip()\n q = q.replace('\\n', '')\n q = ' '.join(q.split())\n quotes_set.add(q)\n\n # print(quotes_set)\n\n except MySQLdb.Error as error:\n print(\"ERROR: {}\".format(error))\n db.rollback()\n\n return quotes_set\n\n\n# get_quotes_for_date(\"2002-04-10\")\n\ndef get_log_ids_of_quoted_logs(date_of_log):\n quotes_set = get_quotes_for_date(date_of_log)\n quoted_log_ids = []\n\n try:\n cursor.execute(\n u\"SELECT log_id, line_message FROM GNUeIRCLogs \"\n u\"WHERE date_of_log=%s\", (date_of_log,)\n )\n for log_id, line_message in cursor.fetchall():\n line_message = line_message.strip()\n line_message = line_message.replace('\\n', '')\n line_message = ' '.join(line_message.split())\n if line_message in quotes_set:\n quoted_log_ids.append(int(log_id))\n # print log_id, line_message\n\n except MySQLdb.Error as error:\n print(\"ERROR: {}\".format(error))\n db.rollback()\n return quoted_log_ids\n\n\ndef mark_quoted_logs_as_summary_per_date(date_of_log):\n log_ids = get_log_ids_of_quoted_logs(date_of_log)\n format_ids = ','.join(['%s'] * len(log_ids))\n log_ids.append(date_of_log)\n print(date_of_log)\n if len(log_ids) > 1:\n try:\n cursor.execute(\n u\"UPDATE GNUeIRCLogs SET is_summary=1 \"\n u\"WHERE log_id IN ({}) \"\n u\"AND date_of_log=%s\".format(format_ids), tuple(log_ids)\n )\n\n except MySQLdb.Error as error:\n print(\"ERROR: {}\".format(error))\n else:\n print(\"No quotes found for: \", date_of_log)\n\n\ndef mark_all_quoted_logs_as_summary():\n try:\n cursor.execute(\n u\"SELECT date_of_log FROM GNUeIRCLogs \"\n u\"GROUP BY date_of_log ORDER BY date_of_log\"\n )\n # print list(cursor)\n for (date_of_log,) in cursor.fetchall():\n if \"2001-10-23\" <= date_of_log <= \"2006-09-21\":\n # print date_of_log\n print('Processing Log: ', date_of_log)\n mark_quoted_logs_as_summary_per_date(date_of_log)\n\n except MySQLdb.Error as error:\n print(\"ERROR: {}\".format(error))\n db.rollback()\n\n\nmark_all_quoted_logs_as_summary()\n# mark_quoted_logs_as_summary_per_date(\"2001-10-23\")\n\n# disconnect from server\ndb.close()\n", "repo_name": "wcyn/gnue-chat-summarization", "sub_path": "data_preprocessing/mark_summary.py", "file_name": "mark_summary.py", "file_ext": "py", "file_size_in_byte": 3156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "app.database.database.DatabaseConnection", "line_number": 5, "usage_type": "call"}, {"api_name": "MySQLdb.Error", "line_number": 34, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 60, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 79, "usage_type": "attribute"}, {"api_name": "MySQLdb.Error", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "1491288539", "text": "# -*- coding: utf8 -*-\n#\nimport json\nfrom typing import List, Dict\n\nimport nltk\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import dataset, dataloader\nfrom tqdm import tqdm\nfrom transformers import AutoTokenizer\n\nfrom src.algo import Tree\nfrom src.config import DATA_PATH, TRAIN_PATH, DEV_PATH, WORD_COUNT\n\n\ndef get_labels() -> Dict:\n label_map_path = DATA_PATH.joinpath('company_data').joinpath('label_map.json')\n if label_map_path.exists():\n with open(label_map_path, 'r', encoding='utf-8') as f:\n return json.loads(f.read())\n\n def _get_label(file):\n labels = {}\n with open(file, 'r', encoding='utf-8') as f:\n for line in tqdm(f, desc='get labels'):\n tree = nltk.Tree.fromstring(line)\n for i, j, label in Tree.factorize(Tree.binarize(tree)[0]):\n labels.setdefault(label, 0)\n labels[label] += 1\n return labels\n\n label1 = _get_label(TRAIN_PATH)\n label2 = _get_label(DEV_PATH)\n final_label = {'[PAD]': 0}\n for label in label1:\n final_label.setdefault(label, len(final_label))\n for label in label2:\n final_label.setdefault(label, len(final_label))\n with open(label_map_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(final_label, ensure_ascii=False, indent=2))\n return final_label\n\n\ndef get_tags() -> Dict:\n pos_map_path = DATA_PATH.joinpath('company_data').joinpath('pos_map.json')\n if pos_map_path.exists():\n with open(pos_map_path, 'r', encoding='utf-8') as f:\n return json.loads(f.read())\n\n def _get_pos(file):\n pos = {}\n with open(file, 'r', encoding='utf-8') as f:\n for line in tqdm(f, desc='get tags'):\n tree = nltk.Tree.fromstring(line)\n words, tags = zip(*tree.pos())\n for tag in tags:\n pos.setdefault(tag, 0)\n return pos\n pos1 = _get_pos(TRAIN_PATH)\n pos2 = _get_pos(DEV_PATH)\n final_pos = {'[PAD]': 0}\n\n for _pos in pos1:\n final_pos.setdefault(_pos, len(final_pos))\n for _pos in pos2:\n final_pos.setdefault(_pos, len(final_pos))\n with open(pos_map_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps(final_pos, ensure_ascii=False, indent=2))\n return final_pos\n\n\ndef encoder_texts(texts: List[List[str]], tokenizer):\n # 统计句子中最大的词长度\n fix_len = max([max([len(word) for word in text]) for text in texts])\n\n matrix = []\n for text in texts:\n vector = []\n\n text = [tokenizer.cls_token, *text, tokenizer.sep_token]\n input_ids = tokenizer.batch_encode_plus(\n text,\n add_special_tokens=False,\n )['input_ids']\n\n for _input_ids in input_ids:\n # 修复例如: texts = [['\\ue5f1\\ue5f1\\ue5f1\\ue5f1']] 这种情况\n _input_ids = _input_ids or [tokenizer.unk_token_id]\n vector.append(_input_ids + (fix_len - len(_input_ids)) * [tokenizer.pad_token_id])\n matrix.append(torch.tensor(vector, dtype=torch.long))\n return pad_sequence(matrix, batch_first=True)\n\n\nclass ConTransform(dataset.Dataset):\n def __init__(self, path: str, transformer: str, device: torch.device = 'cpu'):\n super(ConTransform, self).__init__()\n self.device = device\n\n self.trees = []\n with open(path, 'r', encoding='utf-8') as f:\n for line in tqdm(f, desc='transform'):\n tree = nltk.Tree.fromstring(line)\n # 显存不够\n if len(tree.pos()) > WORD_COUNT:\n continue\n self.trees.append(tree)\n self.trees = sorted(self.trees, key=lambda x: len(x.pos()))\n self.tokenizer = AutoTokenizer.from_pretrained(transformer) if isinstance(transformer, str) else transformer\n\n self.labels = get_labels()\n self.tags = get_tags()\n\n def __len__(self):\n return len(self.trees)\n\n def __getitem__(self, item):\n tree = self.trees[item]\n words, tags = zip(*tree.pos())\n # 为什么在最前面添加0,因为charts[:, 0,0]均为-1\n tag_ids = [0] + [self.tags[tag] for tag in tags]\n chart = [[-1] * (len(words) + 1) for _ in range(len(words) + 1)]\n for i, j, label in Tree.factorize(Tree.binarize(tree)[0]):\n chart[i][j] = self.labels[label]\n return words, torch.tensor(tag_ids, dtype=torch.long), tree, torch.tensor(chart, dtype=torch.long)\n\n def to_dataloader(self, batch_size, shuffle):\n return dataloader.DataLoader(self, batch_size=batch_size, shuffle=shuffle, collate_fn=self.collate_fn)\n\n def collate_fn(self, batch):\n words = encoder_texts([i[0] for i in batch], tokenizer=self.tokenizer)\n tags = pad_sequence([i[1] for i in batch], batch_first=True)\n trees = [i[2] for i in batch]\n charts = [i[3] for i in batch]\n max_chart_len = max([i.size(0) for i in charts])\n\n charts_matrix = torch.zeros(size=(len(charts), max_chart_len, max_chart_len), dtype=torch.long)\n for i, chart in enumerate(charts):\n l = chart.size(0)\n charts_matrix[i, :l, :l] = chart\n\n return words.to(self.device), tags.to(self.device), trees, charts_matrix.to(self.device)\n\n\n\nif __name__ == '__main__':\n # texts = [['\\ue5f1\\ue5f1\\ue5f1\\ue5f1']]\n # encoder_texts(\n # texts,\n # AutoTokenizer.from_pretrained(\n # 'hfl/chinese-electra-180g-small-discriminator'\n # )\n # )\n t = AutoTokenizer.from_pretrained(\n 'hfl/chinese-electra-180g-small-discriminator'\n )\n for i in ConTransform(path='/home/yuzhang/PycharmProjects/con-parser/src/data/company_data/train.txt', transformer=t).to_dataloader(32, False):\n print(i)\n", "repo_name": "geasyheart/con-parser", "sub_path": "src/transform.py", "file_name": "transform.py", "file_ext": "py", "file_size_in_byte": 5820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "41", "api": [{"api_name": "src.config.DATA_PATH.joinpath", "line_number": 18, "usage_type": "call"}, {"api_name": "src.config.DATA_PATH", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 26, "usage_type": "call"}, {"api_name": "nltk.Tree.fromstring", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.Tree", "line_number": 27, "usage_type": "attribute"}, {"api_name": "src.algo.Tree.factorize", "line_number": 28, "usage_type": "call"}, {"api_name": "src.algo.Tree", "line_number": 28, "usage_type": "name"}, {"api_name": "src.algo.Tree.binarize", "line_number": 28, "usage_type": "call"}, {"api_name": "src.config.TRAIN_PATH", "line_number": 33, "usage_type": "argument"}, {"api_name": "src.config.DEV_PATH", "line_number": 34, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 17, "usage_type": "name"}, {"api_name": "src.config.DATA_PATH.joinpath", "line_number": 46, "usage_type": "call"}, {"api_name": "src.config.DATA_PATH", "line_number": 46, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 54, "usage_type": "call"}, {"api_name": "nltk.Tree.fromstring", "line_number": 55, "usage_type": "call"}, {"api_name": "nltk.Tree", "line_number": 55, "usage_type": "attribute"}, {"api_name": "src.config.TRAIN_PATH", "line_number": 60, "usage_type": "argument"}, {"api_name": "src.config.DEV_PATH", "line_number": 61, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 69, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.utils.data.dataset.Dataset", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.utils.data.dataset", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 102, "usage_type": "call"}, {"api_name": "nltk.Tree.fromstring", "line_number": 103, "usage_type": "call"}, {"api_name": "nltk.Tree", "line_number": 103, "usage_type": "attribute"}, {"api_name": "src.config.WORD_COUNT", "line_number": 105, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 109, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 109, "usage_type": "name"}, {"api_name": "src.algo.Tree.factorize", "line_number": 123, "usage_type": "call"}, {"api_name": "src.algo.Tree", "line_number": 123, "usage_type": "name"}, {"api_name": "src.algo.Tree.binarize", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 125, "usage_type": "attribute"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 137, "usage_type": "attribute"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 154, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "9384972493", "text": "# lession1_linear_regression.py\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef show_data_summary(input_data):\n print(\"Describe Data:\")\n print(input_data.describe())\n\n print(\"\\nFirst 10 rows:\")\n print(input_data.head(10))\n print(\"....\")\n\ndef data_hist(input_data):\n input_data.hist(bins=100, figsize=(20, 12))\n plt.show()\n\ndef data_scatter(input_data):\n input_data.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.1)\n plt.show()\n\ndef permutation_split(data, ratio):\n permutation = np.random.permutation(len(data))\n train_size = int(len(data) * (1 - ratio))\n train_index = permutation[:train_size]\n test_index = permutation[train_size:]\n return data.iloc[train_index], data.iloc[test_index]\n\ndef encode_label(data):\n encoder = LabelEncoder()\n data[\"ocean_proximity\"] = encoder.fit_transform(data[\"ocean_proximity\"])\n\ndef imputer_by_median(data):\n imputer = Imputer(strategy=\"median\")\n X = imputer.fit_transform(data)\n return pd.DataFrame(X, columns=data.columns)\n\ndef scale_data(data):\n scalar = MinMaxScaler(feature_range=(0, 100), copy=False)\n scalar.fit_transform(data)\n\ndef compare_scale_data(origin, scaled):\n plt.subplot(2, 1, 1)\n plt.scatter(x=origin[\"longitude\"], y=origin[\"latitude\"],\n c=origin[\"median_house_value\"], cmap=\"viridis\", alpha=0.1)\n plt.subplot(2, 1, 2)\n plt.scatter(x=scaled[\"longitude\"], y=scaled[\"latitude\"],\n c=origin[\"median_house_value\"], cmap=\"viridis\", alpha=0.1)\n plt.show()\n\ndef show_predict_result(test_data, test_value, predict_value):\n ax = plt.subplot(221)\n plt.scatter(x=test_data[\"longitude\"], y=test_data[\"latitude\"],\n s=test_value, c=\"dodgerblue\", alpha=0.5)\n plt.subplot(222)\n plt.hist(test_value, color=\"dodgerblue\")\n\n plt.subplot(223)\n plt.scatter(x=test_data[\"longitude\"], y=test_data[\"latitude\"],\n s=predict_value, c=\"lightseagreen\", alpha=0.5)\n plt.subplot(224)\n plt.hist(predict_value, color=\"lightseagreen\")\n\n plt.show()\n\ndef split_house_value(data):\n value = data[\"median_house_value\"].copy()\n return data.drop([\"median_house_value\"], axis=1), value\n\ndef MES_evaluation(test_value, predict_value):\n mse = mean_squared_error(test_value, predict_value)\n return np.sqrt(mse)\n\nif __name__ == \"__main__\":\n input_data = pd.read_csv(\"./data/housing.csv\")\n # show_data_summary(input_data)\n # data_scatter(input_data)\n\n encode_label(input_data)\n input_data = imputer_by_median(input_data)\n # show_data_summary(input_data)\n\n scale_data(input_data)\n # compare_scale_data(pd.read_csv(\"./data/housing.csv\"), input_data)\n\n train_set, test_set = train_test_split(input_data,\n test_size=0.1, random_state=59)\n train_data, train_value = split_house_value(train_set)\n test_data, test_value = split_house_value(test_set)\n #show_data_summary(train_data)\n\n linear_reg = LinearRegression()\n linear_reg.fit(train_data, train_value)\n\n predict_value = linear_reg.predict(test_data)\n # print(\"Diff: {}\".format(MES_evaluation(test_value, predict_value)))\n\n scores = cross_val_score(linear_reg, train_data, train_value, cv=10)\n print(\"cross_val_score: {}\".format(scores))\n\n", "repo_name": "paulQuei/sklearn_tutorial", "sub_path": "lession1_linear_regression.py", "file_name": "lession1_linear_regression.py", "file_ext": "py", "file_size_in_byte": 3614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "41", "api": [{"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.random.permutation", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Imputer", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 96, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "16652343479", "text": "from sciwing.utils.vis_seq_tags import VisTagging\nfrom sciwing.modules.embedders.trainable_word_embedder import TrainableWordEmbedder\nfrom sciwing.modules.embedders.char_embedder import CharEmbedder\nfrom sciwing.modules.embedders.bow_elmo_embedder import BowElmoEmbedder\nfrom sciwing.modules.embedders.concat_embedders import ConcatEmbedders\nfrom sciwing.modules.lstm2seqencoder import Lstm2SeqEncoder\nfrom sciwing.models.rnn_seq_crf_tagger import RnnSeqCrfTagger\nfrom sciwing.datasets.seq_labeling.seq_labelling_dataset import (\n SeqLabellingDatasetManager,\n)\nfrom sciwing.infer.seq_label_inference.seq_label_inference import (\n SequenceLabellingInference,\n)\nfrom sciwing.cli.sciwing_interact import SciWINGInteract\nfrom sciwing.utils.common import cached_path\nimport sciwing.constants as constants\nimport pathlib\nimport json\nimport torch.nn as nn\nimport wasabi\nfrom typing import List\nfrom collections import defaultdict\nimport torch\nfrom typing import Optional, Tuple\n\nPATHS = constants.PATHS\nMODELS_CACHE_DIR = PATHS[\"MODELS_CACHE_DIR\"]\nDATA_DIR = PATHS[\"DATA_DIR\"]\nDATA_FILE_URLS = constants.DATA_FILE_URLS\n\n\nclass NeuralParscit(nn.Module):\n \"\"\" It defines a neural parscit model. The model is used for citation string parsing. This model\n helps you use a pre-trained model who architecture is fixed and is trained by SciWING.\n You can also fine-tune the model on your own dataset.\n\n For practitioners, we provide ways to obtain results quickly from a set of citations\n stored in a file or from a string. If you want to see the demo head over to our demo site.\n\n \"\"\"\n\n def __init__(self, device: Optional[Tuple[torch.device, int]] = -1):\n super(NeuralParscit, self).__init__()\n\n if isinstance(device, torch.device):\n self.device = device\n elif isinstance(device, int):\n if device == -1:\n device_string = \"cpu\"\n else:\n device_string = f\"cuda:{device}\"\n self.device = torch.device(device_string)\n else:\n raise ValueError(\n f\"Pass the device number or the device object from Pytorch\"\n )\n\n self.models_cache_dir = pathlib.Path(MODELS_CACHE_DIR)\n self.final_model_dir = self.models_cache_dir.joinpath(\"lstm_crf_parscit_final\")\n if not self.models_cache_dir.is_dir():\n self.models_cache_dir.mkdir(parents=True)\n self.model_filepath = self.final_model_dir.joinpath(\"best_model.pt\")\n self.data_dir = pathlib.Path(DATA_DIR)\n\n if not self.data_dir.is_dir():\n self.data_dir.mkdir(parents=True)\n\n self.train_data_file_url = DATA_FILE_URLS[\"PARSCIT_TRAIN\"]\n self.dev_data_file_url = DATA_FILE_URLS[\"PARSCIT_DEV\"]\n self.test_data_file_url = DATA_FILE_URLS[\"PARSCIT_TEST\"]\n self.msg_printer = wasabi.Printer()\n self._download_if_required()\n self.hparams = self._get_hparams()\n self.data_manager = self._get_data()\n self.model: nn.Module = self._get_model()\n self.infer = self._get_infer_client()\n self.vis_tagger = VisTagging()\n self.interact_ = SciWINGInteract(self.infer)\n\n def _get_model(self) -> nn.Module:\n word_embedder = TrainableWordEmbedder(\n embedding_type=self.hparams.get(\"emb_type\"),\n datasets_manager=self.data_manager,\n device=self.device,\n )\n\n char_embedder = CharEmbedder(\n char_embedding_dimension=self.hparams.get(\"char_emb_dim\"),\n hidden_dimension=self.hparams.get(\"char_encoder_hidden_dim\"),\n datasets_manager=self.data_manager,\n device=self.device,\n )\n\n elmo_embedder = BowElmoEmbedder(\n datasets_manager=self.data_manager,\n layer_aggregation=\"sum\",\n device=self.device,\n )\n\n embedder = ConcatEmbedders([word_embedder, char_embedder, elmo_embedder])\n\n lstm2seqencoder = Lstm2SeqEncoder(\n embedder=embedder,\n hidden_dim=self.hparams.get(\"hidden_dim\"),\n bidirectional=self.hparams.get(\"bidirectional\"),\n combine_strategy=self.hparams.get(\"combine_strategy\"),\n rnn_bias=True,\n dropout_value=self.hparams.get(\"lstm2seq_dropout\", 0.0),\n add_projection_layer=False,\n device=self.device,\n )\n model = RnnSeqCrfTagger(\n rnn2seqencoder=lstm2seqencoder,\n encoding_dim=2 * self.hparams.get(\"hidden_dim\")\n if self.hparams.get(\"bidirectional\")\n and self.hparams.get(\"combine_strategy\") == \"concat\"\n else self.hparams.get(\"hidden_dim\"),\n datasets_manager=self.data_manager,\n device=self.device,\n )\n\n return model\n\n def _get_infer_client(self):\n infer_client = SequenceLabellingInference(\n model=self.model,\n model_filepath=self.final_model_dir.joinpath(\"best_model.pt\"),\n datasets_manager=self.data_manager,\n device=self.device,\n )\n return infer_client\n\n def _predict(self, line: str):\n predictions = self.infer.on_user_input(line=line)\n return predictions\n\n def predict_for_file(self, filename: str) -> List[str]:\n \"\"\" Parse the references in a file where every line is a reference\n\n Parameters\n ----------\n filename : str\n The filename where the references are stored\n\n Returns\n -------\n List[str]\n A list of parsed tags\n\n \"\"\"\n predictions = defaultdict(list)\n with open(filename, \"r\") as fp:\n for line_idx, line in enumerate(fp):\n line = line.strip()\n pred_ = self._predict(line=line)\n for namespace, prediction in pred_.items():\n predictions[namespace].append(prediction[0])\n stylized_string = self.vis_tagger.visualize_tokens(\n text=line.split(), labels=prediction[0].split()\n )\n self.msg_printer.divider(\n f\"Predictions for Line: {line_idx+1} from {filename}\"\n )\n print(stylized_string)\n print(\"\\n\")\n\n return predictions[self.data_manager.label_namespaces[0]]\n\n def predict_for_text(self, text: str, show=True) -> str:\n \"\"\" Parse the citation string for the given text\n\n Parameters\n ----------\n text : str\n reference string to parse\n show : bool\n If `True`, then we print the stylized string - where the stylized string provides\n different colors for different tags\n If `False` - then we do not print the stylized string\n\n Returns\n -------\n str\n The parsed citation string\n\n \"\"\"\n predictions = self._predict(line=text)\n for namespace, prediction in predictions.items():\n if show:\n self.msg_printer.divider(f\"Prediction for {namespace.upper()}\")\n stylized_string = self.vis_tagger.visualize_tokens(\n text=text.split(), labels=prediction[0].split()\n )\n print(stylized_string)\n return prediction[0]\n\n def _get_data(self):\n data_manager = SeqLabellingDatasetManager(\n train_filename=cached_path(\n path=self.data_dir.joinpath(\"parscit.train\"),\n url=self.train_data_file_url,\n unzip=False,\n ),\n dev_filename=cached_path(\n path=self.data_dir.joinpath(\"parscit.dev\"),\n url=self.dev_data_file_url,\n unzip=False,\n ),\n test_filename=cached_path(\n path=self.data_dir.joinpath(\"parscit.test\"),\n url=self.test_data_file_url,\n unzip=False,\n ),\n )\n return data_manager\n\n def _get_hparams(self):\n with open(self.final_model_dir.joinpath(\"hyperparams.json\")) as fp:\n hyperparams = json.load(fp)\n return hyperparams\n\n def _download_if_required(self):\n # download the model weights and data to client machine\n cached_path(\n path=f\"{self.final_model_dir}.zip\",\n url=\"https://parsect-models.s3-ap-southeast-1.amazonaws.com/lstm_crf_parscit_final.zip\",\n unzip=True,\n )\n\n def interact(self):\n \"\"\" Interact with the pretrained model\n You can also interact from command line using `sciwing interact neural-parscit`\n \"\"\"\n self.interact_.interact()\n\n\nif __name__ == \"__main__\":\n neural_parscit = NeuralParscit(device=0)\n", "repo_name": "abhinavkashyap/sciwing", "sub_path": "sciwing/models/neural_parscit.py", "file_name": "neural_parscit.py", "file_ext": "py", "file_size_in_byte": 8766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 57, "dataset": "github-code", "pt": "33", "api": [{"api_name": "sciwing.constants.PATHS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sciwing.constants", "line_number": 26, "usage_type": "name"}, {"api_name": "sciwing.constants.DATA_FILE_URLS", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sciwing.constants", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 52, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 58, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "call"}, {"api_name": "wasabi.Printer", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "sciwing.utils.vis_seq_tags.VisTagging", "line_number": 77, "usage_type": "call"}, {"api_name": "sciwing.cli.sciwing_interact.SciWINGInteract", "line_number": 78, "usage_type": "call"}, {"api_name": "sciwing.modules.embedders.trainable_word_embedder.TrainableWordEmbedder", "line_number": 81, "usage_type": "call"}, {"api_name": "sciwing.modules.embedders.char_embedder.CharEmbedder", "line_number": 87, "usage_type": "call"}, {"api_name": "sciwing.modules.embedders.bow_elmo_embedder.BowElmoEmbedder", "line_number": 94, "usage_type": "call"}, {"api_name": "sciwing.modules.embedders.concat_embedders.ConcatEmbedders", "line_number": 100, "usage_type": "call"}, {"api_name": "sciwing.modules.lstm2seqencoder.Lstm2SeqEncoder", "line_number": 102, "usage_type": "call"}, {"api_name": "sciwing.models.rnn_seq_crf_tagger.RnnSeqCrfTagger", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "sciwing.infer.seq_label_inference.seq_label_inference.SequenceLabellingInference", "line_number": 125, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 151, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 137, "usage_type": "name"}, {"api_name": "sciwing.datasets.seq_labeling.seq_labelling_dataset.SeqLabellingDatasetManager", "line_number": 198, "usage_type": "call"}, {"api_name": "sciwing.utils.common.cached_path", "line_number": 199, "usage_type": "call"}, {"api_name": "sciwing.utils.common.cached_path", "line_number": 204, "usage_type": "call"}, {"api_name": "sciwing.utils.common.cached_path", "line_number": 209, "usage_type": "call"}, {"api_name": "json.load", "line_number": 219, "usage_type": "call"}, {"api_name": "sciwing.utils.common.cached_path", "line_number": 224, "usage_type": "call"}]} +{"seq_id": "34725008951", "text": "\"\"\"\nThis module will test the backup and restore commands on neo4j\n\"\"\"\nimport os\nimport time\n\nfrom faker import Faker\n\nfrom controller import BACKUP_DIR, colors\nfrom controller.app import Configuration\nfrom tests import (\n Capture,\n TemporaryRemovePath,\n create_project,\n exec_command,\n execute_outside,\n init_project,\n pull_images,\n random_project_name,\n service_verify,\n start_project,\n start_registry,\n)\n\n\ndef test_all(capfd: Capture, faker: Faker) -> None:\n execute_outside(capfd, \"backup neo4j\")\n execute_outside(capfd, \"restore neo4j\")\n\n backup_folder = BACKUP_DIR.joinpath(\"neo4j\")\n\n create_project(\n capfd=capfd,\n name=random_project_name(faker),\n auth=\"neo4j\",\n frontend=\"no\",\n )\n init_project(capfd)\n start_registry(capfd)\n\n exec_command(\n capfd,\n \"backup neo4j\",\n f\"image, execute {colors.RED}rapydo pull neo4j\",\n )\n exec_command(\n capfd,\n \"restore neo4j\",\n f\"image, execute {colors.RED}rapydo pull neo4j\",\n )\n\n pull_images(capfd)\n start_project(capfd)\n\n service_verify(capfd, \"neo4j\")\n\n # This will initialize neo4j\n exec_command(capfd, \"shell backend 'restapi init'\")\n\n time.sleep(20)\n # Just some delay extra delay. restapi init alone not always is enough...\n if Configuration.swarm_mode:\n time.sleep(30)\n\n # Verify the initialization\n cypher = \"shell neo4j 'bin/cypher-shell\"\n exec_command(\n capfd,\n f'{cypher} \"match (r: Role) return r.name, r.description\"\\'',\n '\"normal_user\", \"User\"',\n )\n\n # Backup command\n exec_command(\n capfd,\n \"backup neo4j\",\n \"Neo4j is running and the backup will temporary stop it. \"\n \"If you want to continue add --force flag\",\n )\n exec_command(\n capfd,\n \"backup neo4j --force --restart backend --restart rabbit\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n \"Restarting services in 20 seconds...\",\n \"Restarting services in 10 seconds...\",\n )\n # This is to verify that --force restarted neo4j\n exec_command(\n capfd,\n \"backup neo4j\",\n \"Neo4j is running and the backup will temporary stop it. \"\n \"If you want to continue add --force flag\",\n )\n\n exec_command(\n capfd,\n \"backup invalid\",\n \"Invalid value for\",\n \"is not one of 'neo4j', 'postgres', 'rabbit', 'redis'\",\n )\n\n exec_command(capfd, \"remove\", \"Stack removed\")\n\n exec_command(\n capfd,\n \"backup neo4j\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n # Test backup retention\n exec_command(\n capfd,\n \"backup neo4j --max 999 --dry-run\",\n \"Dry run mode is enabled\",\n \"Found 2 backup files, maximum not reached\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n # Verify that due to dry run, no backup is executed\n exec_command(\n capfd,\n \"backup neo4j --max 999 --dry-run\",\n \"Dry run mode is enabled\",\n \"Found 2 backup files, maximum not reached\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n exec_command(\n capfd,\n \"backup neo4j --max 1 --dry-run\",\n \"Dry run mode is enabled\",\n \"deleted because exceeding the max number of backup files (1)\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n # Verify that due to dry run, no backup is executed\n exec_command(\n capfd,\n \"backup neo4j --max 1 --dry-run\",\n \"Dry run mode is enabled\",\n \"deleted because exceeding the max number of backup files (1)\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n # Create an additional backup to the test deletion (now backups are 3)\n exec_command(\n capfd,\n \"backup neo4j\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n # Save the current number of backup files\n number_of_backups = len(list(backup_folder.glob(\"*\")))\n\n # Verify the deletion\n exec_command(\n capfd,\n \"backup neo4j --max 1\",\n \"deleted because exceeding the max number of backup files (1)\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n # Now the number of backups should be reduced by 1 (i.e. +1 -2)\n assert len(list(backup_folder.glob(\"*\"))) == number_of_backups - 1\n\n # Verify that --max ignores files without the date pattern\n backup_folder.joinpath(\"xyz\").touch(exist_ok=True)\n backup_folder.joinpath(\"xyz.ext\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-01\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-01_01\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-01_01_01\").touch(exist_ok=True)\n backup_folder.joinpath(\"9999_01_01-01_01_01.bak\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_99_01-01_01_01.bak\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_99-01_01_01.bak\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-99_01_01.bak\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-01_99_01.bak\").touch(exist_ok=True)\n backup_folder.joinpath(\"2020_01_01-01_01_99.bak\").touch(exist_ok=True)\n\n exec_command(\n capfd,\n \"backup neo4j --max 999 --dry-run\",\n \"Dry run mode is enabled\",\n # Still finding 2, all files above are ignore because not matching the pattern\n \"Found 2 backup files, maximum not reached\",\n \"Starting backup on neo4j...\",\n \"Backup completed: data/backup/neo4j/\",\n )\n\n exec_command(capfd, \"start\", \"Stack started\")\n\n # Just some delay extra delay, neo4j is a slow starter\n time.sleep(20)\n\n # Restore command\n exec_command(\n capfd, \"restore neo4j\", \"Please specify one of the following backup:\", \".dump\"\n )\n\n exec_command(\n capfd,\n \"restore neo4j invalid\",\n \"Invalid backup file, data/backup/neo4j/invalid does not exist\",\n )\n\n with TemporaryRemovePath(BACKUP_DIR):\n exec_command(\n capfd,\n \"restore neo4j\",\n \"No backup found, the following folder \"\n \"does not exist: data/backup/neo4j\",\n )\n\n with TemporaryRemovePath(backup_folder):\n exec_command(\n capfd,\n \"restore neo4j\",\n f\"No backup found, the following folder does not exist: {backup_folder}\",\n )\n\n os.mkdir(\"data/backup/neo4j\")\n\n exec_command(\n capfd,\n \"restore neo4j\",\n \"No backup found, data/backup/neo4j is empty\",\n )\n\n open(\"data/backup/neo4j/test.gz\", \"a\").close()\n\n exec_command(\n capfd,\n \"restore neo4j\",\n \"No backup found, data/backup/neo4j is empty\",\n )\n\n open(\"data/backup/neo4j/test.dump\", \"a\").close()\n\n exec_command(\n capfd,\n \"restore neo4j\",\n \"Please specify one of the following backup:\",\n \"test.dump\",\n )\n\n os.remove(\"data/backup/neo4j/test.gz\")\n os.remove(\"data/backup/neo4j/test.dump\")\n\n # Test restore on neo4j (required neo4j to be down)\n files = os.listdir(\"data/backup/neo4j\")\n files = [f for f in files if f.endswith(\".dump\")]\n files.sort()\n neo4j_dump_file = files[-1]\n\n time.sleep(20)\n\n # Here we test the restore procedure:\n # 1) verify some data in the database\n exec_command(\n capfd,\n f'{cypher} \"match (r: Role) return r.name, r.description\"\\'',\n '\"normal_user\", \"User\"',\n )\n\n # 2) Modify the data\n exec_command(capfd, f'{cypher} \"match (r: Role) SET r.description = r.name\"\\'')\n exec_command(\n capfd,\n f'{cypher} \"match (r: Role) return r.name, r.description\"\\'',\n '\"normal_user\", \"normal_user\"',\n )\n exec_command(capfd, \"remove\")\n\n # 3) restore the dump\n exec_command(\n capfd,\n f\"restore neo4j {neo4j_dump_file}\",\n \"Starting restore on neo4j...\",\n \"Done: \",\n f\"Restore from data/backup/neo4j/{neo4j_dump_file} completed\",\n )\n\n exec_command(capfd, \"start\", \"Stack started\")\n\n exec_command(\n capfd,\n f\"restore neo4j {neo4j_dump_file}\",\n \"Neo4j is running and the restore will temporary stop it.\",\n \"If you want to continue add --force flag\",\n )\n\n exec_command(\n capfd,\n f\"restore neo4j {neo4j_dump_file} --force --restart backend\",\n \"Starting restore on neo4j...\",\n \"Done: \",\n f\"Restore from data/backup/neo4j/{neo4j_dump_file} completed\",\n \"Restarting services in 20 seconds...\",\n \"Restarting services in 10 seconds...\",\n )\n\n # Wait neo4j to completely startup\n service_verify(capfd, \"neo4j\")\n\n # 4) verify data match again point 1 (restore completed)\n exec_command(\n capfd,\n f'{cypher} \"match (r: Role) return r.name, r.description\"\\'',\n '\"normal_user\", \"User\"',\n )\n", "repo_name": "rapydo/do", "sub_path": "tests/test_backup_restore_neo4j.py", "file_name": "test_backup_restore_neo4j.py", "file_ext": "py", "file_size_in_byte": 9326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tests.Capture", "line_number": 26, "usage_type": "name"}, {"api_name": "faker.Faker", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.execute_outside", "line_number": 27, "usage_type": "call"}, {"api_name": "tests.execute_outside", "line_number": 28, "usage_type": "call"}, {"api_name": "controller.BACKUP_DIR.joinpath", "line_number": 30, "usage_type": "call"}, {"api_name": "controller.BACKUP_DIR", "line_number": 30, "usage_type": "name"}, {"api_name": "tests.create_project", "line_number": 32, "usage_type": "call"}, {"api_name": "tests.random_project_name", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.init_project", "line_number": 38, "usage_type": "call"}, {"api_name": "tests.start_registry", "line_number": 39, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 41, "usage_type": "call"}, {"api_name": "controller.colors.RED", "line_number": 44, "usage_type": "attribute"}, {"api_name": "controller.colors", "line_number": 44, "usage_type": "name"}, {"api_name": "tests.exec_command", "line_number": 46, "usage_type": "call"}, {"api_name": "controller.colors.RED", "line_number": 49, "usage_type": "attribute"}, {"api_name": "controller.colors", "line_number": 49, "usage_type": "name"}, {"api_name": "tests.pull_images", "line_number": 52, "usage_type": "call"}, {"api_name": "tests.start_project", "line_number": 53, "usage_type": "call"}, {"api_name": "tests.service_verify", "line_number": 55, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "controller.app.Configuration.swarm_mode", "line_number": 62, "usage_type": "attribute"}, {"api_name": "controller.app.Configuration", "line_number": 62, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 67, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 74, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 80, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 89, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 96, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 103, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 105, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 113, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 122, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 131, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 140, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 150, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 161, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 187, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 197, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 203, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 207, "usage_type": "call"}, {"api_name": "tests.TemporaryRemovePath", "line_number": 213, "usage_type": "call"}, {"api_name": "controller.BACKUP_DIR", "line_number": 213, "usage_type": "argument"}, {"api_name": "tests.exec_command", "line_number": 214, "usage_type": "call"}, {"api_name": "tests.TemporaryRemovePath", "line_number": 221, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 222, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 228, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 230, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 238, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 246, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 253, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 254, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 257, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 262, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 266, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 273, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 274, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 279, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 282, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 290, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 292, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 299, "usage_type": "call"}, {"api_name": "tests.service_verify", "line_number": 310, "usage_type": "call"}, {"api_name": "tests.exec_command", "line_number": 313, "usage_type": "call"}]} +{"seq_id": "7226763878", "text": "import pytest\nimport numpy\n\nfrom mpi4py import MPI\ncomm = MPI.COMM_WORLD\nnumpy.random.seed(7)\nskip = comm.size == 1\n\n@pytest.mark.unit\n@pytest.mark.skipif(skip, reason=\"Test should be run on multiple cores.\")\ndef test_pair_branch():\n if comm.rank == 0:\n weights = [0.001, 1.0148, 4.348]\n else:\n weights = [1.2, 2.348, 4.4]\n walker_info = []\n for i, w in enumerate(weights):\n walker_info.append([w,1,comm.rank,comm.rank])\n comm.barrier()\n glob_inf = comm.allgather(walker_info)\n # print(comm.rank, glob_inf)\n # comm.barrier()\n min_weight = 0.1\n max_weight = 4.0\n # Unpack lists\n glob_inf = numpy.array([item for sub in glob_inf for item in sub])\n # glob_inf.sort(key=lambda x: x[0])\n sort = numpy.argsort(glob_inf[:,0], kind='mergesort')\n isort = numpy.argsort(sort, kind='mergesort')\n glob_inf = glob_inf[sort]\n s = 0\n e = len(glob_inf) - 1\n while s < e:\n if glob_inf[s][0] < min_weight or glob_inf[e][0] > max_weight:\n # sum of paired walker weights\n wab = glob_inf[s][0] + glob_inf[e][0]\n r = numpy.random.rand()\n if r < glob_inf[e][0] / wab:\n # clone large weight walker\n glob_inf[e][0] = 0.5 * wab\n glob_inf[e][1] = 2\n # Processor we will send duplicated walker to\n glob_inf[e][3] = glob_inf[s][2]\n # Kill small weight walker\n glob_inf[s][0] = 0.0\n glob_inf[s][1] = 0\n glob_inf[s][3] = glob_inf[e][2]\n else:\n # clone small weight walker\n glob_inf[s][0] = 0.5 * wab\n glob_inf[s][1] = 2\n # Processor we will send duplicated walker to\n glob_inf[s][3] = glob_inf[e][2]\n # Kill small weight walker\n glob_inf[e][0] = 0.0\n glob_inf[e][1] = 0\n glob_inf[e][3] = glob_inf[s][2]\n s += 1\n e -= 1\n else:\n break\n glob_inf = glob_inf[isort]\n reqs = []\n nw = len(weights)\n for walker in glob_inf[comm.rank*nw:(comm.rank+1)*nw]:\n if walker[1] > 1:\n tag = comm.rank*len(walker_info) + walker[3]\n reqs.append(comm.isend(comm.rank*numpy.ones(2),\n dest=int(round(walker[3])), tag=tag))\n buff = []\n for walker in glob_inf[comm.rank*nw:(comm.rank+1)*nw]:\n if walker[1] == 0:\n tag = walker[3]*len(walker_info) + comm.rank\n buff.append(comm.recv(source=int(round(walker[3])), tag=tag))\n for r in reqs:\n r.wait()\n if comm.rank == 0:\n assert len(buff) == 2\n assert sum(buff[0]) == 2\n assert sum(buff[1]) == 0\n", "repo_name": "pauxy-qmc/pauxy", "sub_path": "pauxy/walkers/tests/test_handler.py", "file_name": "test_handler.py", "file_ext": "py", "file_size_in_byte": 2769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "41", "api": [{"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 5, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 68, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipif", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "3782894062", "text": "import sys\nfrom collections import Counter \ninput = sys.stdin.readline\nA=[]\nfor i in sys.stdin.read():\n str_low = i.lower()\n A.append(str_low)\n\nstr_c_list = []\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\nfor i in A:\n str_c_list.append(Counter(i))\nfor _ in alphabet:\n count= 0\n for i in str_c_list:\n count += i[_]\n print(_+\" :\",end=\" \") \n print(count)\n", "repo_name": "Shuhei-pp/ProgramingCompe", "sub_path": "ITP1/8-C.py", "file_name": "8-C.py", "file_ext": "py", "file_size_in_byte": 357, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.stdin.read", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "19388167975", "text": "import torch\nfrom transformers import LogitsWarper\n\n\nclass AdvancedRepetitionPenaltyLogitsProcessor(LogitsWarper):\n def __init__(self, *args, **kwargs):\n pass\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n self.penalty_range = int(self.penalty_range)\n clipped_penalty_range = min(input_ids.shape[-1], self.penalty_range)\n\n if self.penalty != 1.0:\n if self.penalty_range > 0:\n if clipped_penalty_range < input_ids.shape[1]:\n input_ids = input_ids[..., -clipped_penalty_range:]\n\n if self.penalty_slope != 0:\n _penalty = (torch.arange(self.penalty_range, dtype=scores.dtype, device=scores.device)/(self.penalty_range - 1)) * 2. - 1\n _penalty = (self.penalty_slope * _penalty) / (1 + torch.abs(_penalty) * (self.penalty_slope - 1))\n _penalty = 1 + ((_penalty + 1) / 2).unsqueeze(0) * (self.penalty - 1)\n self.penalty = _penalty[..., -clipped_penalty_range:]\n\n score = torch.gather(scores, 1, input_ids)\n score = torch.where(score <= 0, score * self.penalty, score / self.penalty)\n scores.scatter_(1, input_ids, score)\n\n return scores\n\n\nclass TailFreeLogitsWarper(LogitsWarper):\n\n def __init__(self, tfs: float, filter_value: float = -float(\"Inf\"), min_tokens_to_keep: int = 1):\n tfs = float(tfs)\n if tfs < 0 or tfs > 1.0:\n raise ValueError(f\"`tfs` has to be a float >= 0 and <= 1, but is {tfs}\")\n self.tfs = tfs\n self.filter_value = filter_value\n self.min_tokens_to_keep = min_tokens_to_keep\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n if self.filter_value >= 1.0:\n return scores\n sorted_logits, sorted_indices = torch.sort(scores, descending=True)\n probs = sorted_logits.softmax(dim=-1)\n\n # Compute second derivative normalized CDF\n d2 = probs.diff().diff().abs()\n normalized_d2 = d2 / d2.sum(dim=-1, keepdim=True)\n normalized_d2_cdf = normalized_d2.cumsum(dim=-1)\n\n # Remove tokens with CDF value above the threshold (token with 0 are kept)\n sorted_indices_to_remove = normalized_d2_cdf > self.tfs\n\n # Centre the distribution around the cutoff as in the original implementation of the algorithm\n sorted_indices_to_remove = torch.cat(\n (\n torch.zeros(scores.shape[0], 1, dtype=torch.bool, device=scores.device),\n sorted_indices_to_remove,\n torch.ones(scores.shape[0], 1, dtype=torch.bool, device=scores.device),\n ),\n dim=-1,\n )\n\n if self.min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep\n sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0\n\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n scores = scores.masked_fill(indices_to_remove, self.filter_value)\n return scores\n\n\nclass TypicalLogitsWarper(LogitsWarper):\n '''\n Typical sampling, described in https://arxiv.org/pdf/2202.00666.pdf\n '''\n\n def __init__(self, typical: float, filter_value: float = -float(\"Inf\"), min_tokens_to_keep: int = 1):\n typical = float(typical)\n if typical < 0 or typical > 1.0:\n raise ValueError(f\"`typical` has to be a float >= 0 and <= 1, but is {typical}\")\n self.typical = typical\n self.filter_value = filter_value\n self.min_tokens_to_keep = min_tokens_to_keep\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n if self.filter_value >= 1.0:\n return scores\n\n # Compute softmax probabilities and the natural logarithms of them\n probs = scores.softmax(dim=-1)\n log_probs = probs.log()\n\n # Compute the negative of entropy, which is the sum of p*ln(p) for all p\n # in the set of softmax probabilities of the logits\n neg_entropy = (probs * log_probs).nansum(dim=-1, keepdim=True)\n\n # Determine absolute difference between the negative entropy and the\n # log probabilities\n entropy_deviation = (neg_entropy - log_probs).abs()\n\n # Keep certain tokens such that the sum of the entropy_deviation of the\n # kept tokens is the smallest possible value such that the sum of the\n # softmax probabilities of the kept tokens is at least the threshold\n # value (by sorting the tokens in ascending order of entropy_deviation\n # and then keeping the smallest possible number of tokens from the\n # beginning such that sum of softmax probabilities is at or above the\n # threshold)\n _, sorted_indices = torch.sort(entropy_deviation)\n sorted_logits = probs.gather(-1, sorted_indices)\n sorted_indices_to_remove = sorted_logits.cumsum(dim=-1) >= self.typical\n sorted_indices_to_remove = sorted_indices_to_remove.roll(1, dims=-1)\n\n min_tokens_to_keep = max(self.min_tokens_to_keep, 1)\n # Keep at least min_tokens_to_keep\n sorted_indices_to_remove[..., : min_tokens_to_keep] = 0\n\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n scores = scores.masked_fill(indices_to_remove, self.filter_value)\n return scores\n\n\nclass TopALogitsWarper(LogitsWarper):\n def __init__(self, top_a: float, filter_value: float = -float(\"Inf\"), min_tokens_to_keep: int = 1):\n top_a = float(top_a)\n if top_a < 0 or top_a > 1.0:\n raise ValueError(f\"`top_a` has to be a float >= 0 and <= 1, but is {top_a}\")\n self.top_a = top_a\n self.filter_value = filter_value\n self.min_tokens_to_keep = min_tokens_to_keep\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n if self.filter_value >= 1.0:\n return scores\n\n sorted_logits, sorted_indices = torch.sort(scores, descending=True)\n probs = sorted_logits.softmax(dim=-1)\n\n # Remove tokens with probability less than top_a*(max(probs))^2 (token with 0 are kept)\n probs_max = probs[..., 0, None]\n sorted_indices_to_remove = probs < probs_max * probs_max * self.top_a\n\n if self.min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep\n sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0\n\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n scores = scores.masked_fill(indices_to_remove, self.filter_value)\n return scores\n", "repo_name": "KoboldAI/KoboldAI-Client", "sub_path": "warpers.py", "file_name": "warpers.py", "file_ext": "py", "file_size_in_byte": 6774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3002, "dataset": "github-code", "pt": "41", "api": [{"api_name": "transformers.LogitsWarper", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 25, "usage_type": "call"}, {"api_name": "transformers.LogitsWarper", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.sort", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 60, "usage_type": "attribute"}, {"api_name": "transformers.LogitsWarper", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 87, "usage_type": "attribute"}, {"api_name": "torch.sort", "line_number": 110, "usage_type": "call"}, {"api_name": "transformers.LogitsWarper", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 133, "usage_type": "attribute"}, {"api_name": "torch.sort", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "37209398804", "text": "# Standard Library\nimport subprocess\nfrom enum import Enum\nfrom typing import Optional\n\n\nclass PingConnect(Enum):\n CONNECTED = 1\n NOT_CONNECTED = 2\n\n\nclass Pinger:\n \"\"\"\n Calls a subprocess to run the ping command and then parses the results\n\n Unforutnatly this can't in pure Python without admin privilages\n As a normal process requires admin for an ICMP request (ping) \n \"\"\"\n def __init__(self, ip_address: str):\n if ':' in ip_address:\n self.ip_address, self.port = ip_address.split(':')\n else:\n self.ip_address = ip_address\n self.port = None\n\n def get_ping_time(self) -> tuple[PingConnect, Optional[int]]:\n response = subprocess.run(\n ['ping', '-n', '1', '-w', '1000', self.ip_address],\n stdin=subprocess.PIPE, # Required for PyInstaller --noconsole\n capture_output=True,\n encoding='ascii',\n creationflags=subprocess.CREATE_NO_WINDOW # Required for noflickering in exe\n )\n if response.returncode != 0:\n return PingConnect.NOT_CONNECTED, None\n\n for line in response.stdout.splitlines():\n if 'Reply from' in line:\n time_equals = line.strip().split()[4]\n time_ms = time_equals.split('=')[1]\n time = int(time_ms[:-2])\n break\n else:\n return PingConnect.NOT_CONNECTED, None\n\n return PingConnect.CONNECTED, time\n", "repo_name": "notatallshaw/fall_guys_ping_estimate", "sub_path": "fgpe/pinger.py", "file_name": "pinger.py", "file_ext": "py", "file_size_in_byte": 1477, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 36, "dataset": "github-code", "pt": "33", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "subprocess.CREATE_NO_WINDOW", "line_number": 32, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "12637887007", "text": "import sys\r\nimport cv2\r\nimport mediapipe as mp\r\nimport math\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog\r\nfrom PyQt5.QtCore import Qt, QTimer, QPoint\r\nfrom PyQt5.QtGui import QImage, QPixmap, QPainter, QPen, QColor, QFont\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\nimport wx\r\n\r\nclass Frame(wx.Frame):\r\n def __init__(self, title):\r\n wx.Frame.__init__(self, None, title=title, size=(600,400))\r\n\r\n self.panel = wx.Panel(self)\r\n box = wx.BoxSizer(wx.VERTICAL)\r\n m_text = wx.StaticText(self.panel, -1, 'Fall Detected! Please check the app!')\r\n m_text.SetSize(m_text.GetBestSize())\r\n\r\n box.Add(m_text, 0, wx.ALL, 10)\r\n self.panel.SetSizer(box)\r\n self.panel.Layout()\r\n\r\n self.timer = wx.Timer(self)\r\n self.Bind(wx.EVT_TIMER, self.onClose, self.timer)\r\n self.timer.Start(3000)\r\n\r\n def onClose(self, event):\r\n self.Close()\r\n\r\n\r\n\r\nclass PoseDetectionApp(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.initUI()\r\n self.initMediapipe()\r\n self.landmarks_sequence = []\r\n self.alert_message = \"\"\r\n self.alert_triggered = False\r\n self.drawing = False\r\n self.rect_start = None\r\n self.rect_end = None\r\n self.rectangles = []\r\n\r\n ret, frame = self.cap.read()\r\n frame = cv2.resize(frame, (640, 360))\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n h, w, c = frame.shape\r\n qImg = QImage(frame.data, w, h, w * c, QImage.Format_RGB888)\r\n pixmap = QPixmap.fromImage(qImg)\r\n self.label.setPixmap(pixmap)\r\n\r\n def initUI(self):\r\n self.setWindowTitle(\"Pose Detection App\")\r\n self.setGeometry(100, 100, 800, 600)\r\n\r\n self.central_widget = QWidget(self)\r\n self.setCentralWidget(self.central_widget)\r\n\r\n self.layout = QVBoxLayout()\r\n\r\n self.label = QLabel(self)\r\n self.layout.addWidget(self.label)\r\n\r\n self.start_button = QPushButton(\"Start\", self)\r\n self.start_button.clicked.connect(self.start_capture)\r\n self.layout.addWidget(self.start_button)\r\n\r\n self.draw_area_button = QPushButton(\"Draw Area\", self)\r\n self.draw_area_button.clicked.connect(self.start_drawing_area) # Connect the button to drawing mode\r\n self.layout.addWidget(self.draw_area_button)\r\n\r\n self.delete_area_button = QPushButton(\"Delete Area\", self)\r\n self.delete_area_button.clicked.connect(self.delete_areas)\r\n self.layout.addWidget(self.delete_area_button)\r\n\r\n self.browse_video_button = QPushButton(\"Browse Video\", self)\r\n self.browse_video_button.clicked.connect(self.browse_video)\r\n self.layout.addWidget(self.browse_video_button)\r\n\r\n self.central_widget.setLayout(self.layout)\r\n\r\n self.cap = cv2.VideoCapture(0)\r\n self.timer = QTimer(self)\r\n self.timer.timeout.connect(self.update_frame)\r\n self.is_capturing = False\r\n self.frame_counter = 0\r\n\r\n def initMediapipe(self):\r\n self.mp_drawing = mp.solutions.drawing_utils\r\n self.mp_pose = mp.solutions.pose\r\n self.pose = self.pose = self.mp_pose.Pose(min_detection_confidence=0.6, min_tracking_confidence=0.6)\r\n\r\n\r\n def browse_video(self):\r\n options = QFileDialog.Options()\r\n file_path, _ = QFileDialog.getOpenFileName(self, \"Open Video File\", \"\", \"Video Files (*.mp4 *.avi *.mkv);;All Files (*)\", options=options)\r\n if file_path:\r\n self.video_path = file_path\r\n self.cap = cv2.VideoCapture(self.video_path)\r\n\r\n def start_capture(self):\r\n if not self.is_capturing:\r\n self.is_capturing = True\r\n self.start_button.setText(\"Stop\")\r\n self.timer.start(20)\r\n else:\r\n self.is_capturing = False\r\n self.start_button.setText(\"Start\")\r\n self.timer.stop()\r\n def delete_areas(self):\r\n self.rectangles = [] # Clear the stored rectangles\r\n\r\n def start_drawing_area(self):\r\n self.draw_area_button.setText(\"Drawing Area\")\r\n self.setCursor(Qt.CrossCursor)\r\n self.drawing = True\r\n\r\n def show_popup(self):\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Companion Catch\")\r\n msg.setText(\"ALERT! FALL DETECTED!\")\r\n\r\n x = msg.exec_()\r\n\r\n def mousePressEvent(self, event):\r\n if self.drawing:\r\n if not self.rect_start:\r\n self.rect_start = event.pos()\r\n else:\r\n self.rect_end = event.pos()\r\n self.drawing = False\r\n self.setCursor(Qt.ArrowCursor)\r\n self.rectangles.append((self.rect_start, self.rect_end))\r\n self.update_frame()\r\n self.rect_start = None\r\n self.rect_end = None\r\n\r\n # def mouseReleaseEvent(self, event):\r\n # if self.drawing:\r\n # self.rect_end = event.pos()\r\n # self.drawing = False\r\n # self.setCursor(Qt.ArrowCursor)\r\n # self.rectangles.append((self.rect_start, self.rect_end))\r\n # self.update_frame()\r\n\r\n\r\n def update_frame(self):\r\n ret, frame = self.cap.read()\r\n if not ret:\r\n return\r\n frame = cv2.resize(frame, (640, 360))\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n results = self.pose.process(frame)\r\n if results.pose_landmarks:\r\n self.mp_drawing.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)\r\n self.frame_counter += 1\r\n\r\n # Collect pose_landmarks for every 20 frames\r\n if self.frame_counter == 20:\r\n self.landmarks_sequence.append(results.pose_landmarks)\r\n\r\n \r\n\r\n\r\n\r\n # Chelandmarks_insidekeypoint (index 0) is in the bottom half of the image\r\n last_landmarks = self.landmarks_sequence[-1]\r\n nose_landmark = last_landmarks.landmark[0] # Index 0 is the Nose landmark\r\n left_knee = last_landmarks.landmark[25]\r\n right_knee = last_landmarks.landmark[26]\r\n left_shoulder = last_landmarks.landmark[11]\r\n right_shoulder = last_landmarks.landmark[12]\r\n left_hip = last_landmarks.landmark[23]\r\n right_hip = last_landmarks.landmark[24]\r\n left_ankle = last_landmarks.landmark[27]\r\n right_ankle = last_landmarks.landmark[28]\r\n left_shoulder = last_landmarks.landmark[11]\r\n right_shoulder = last_landmarks.landmark[12]\r\n if (self.rectangles != None):\r\n print(f\"IGNORE AREA: {self.rect_start} {self.rect_end}\")\r\n # Check if the \"FALLING\" condition is met\r\n if (nose_landmark.y > right_knee.y) or (nose_landmark.y > left_knee.y):\r\n self.alert_triggered = True\r\n if (nose_landmark.x < left_shoulder.x) and (nose_landmark.x < right_shoulder.x) and (nose_landmark.x < right_knee.x) and (nose_landmark.x > left_knee.x):\r\n self.alert_triggered = True\r\n # Clear the alert if the condition is not met\r\n\r\n # Assuming left_hip, left_knee, left_ankle, right_hip, right_knee, and right_ankle are represented as landmarks\r\n\r\n\r\n\r\n\r\n\r\n if not self.alert_triggered:\r\n self.alert_message = \"\"\r\n else:\r\n self.alert_message = \"FALLING\"\r\n\r\n if self.rectangles:\r\n for rect_start, rect_end in self.rectangles:\r\n landmarks_inside = 0\r\n\r\n for landmark in results.pose_landmarks.landmark:\r\n landmark_x = int(landmark.x * frame.shape[1])\r\n landmark_y = int(landmark.y * frame.shape[0])\r\n\r\n if rect_start.x() < landmark_x < rect_end.x() and rect_start.y() < landmark_y < rect_end.y():\r\n landmarks_inside += 1\r\n\r\n print(f\"Landmarks inside the rectangle: {landmarks_inside}\")\r\n if landmarks_inside > 15:\r\n self.alert_message = \"IGNORE\"\r\n\r\n # Create QPoint objects from the landmarks\r\n left_hip_point = QPoint(int(left_hip.x * frame.shape[1]), int(left_hip.y * frame.shape[0]))\r\n left_knee_point = QPoint(int(left_knee.x * frame.shape[1]), int(left_knee.y * frame.shape[0]))\r\n left_ankle_point = QPoint(int(left_ankle.x * frame.shape[1]), int(left_ankle.y * frame.shape[0]))\r\n right_hip_point = QPoint(int(right_hip.x * frame.shape[1]), int(right_hip.y * frame.shape[0]))\r\n right_knee_point = QPoint(int(right_knee.x * frame.shape[1]), int(right_knee.y * frame.shape[0]))\r\n right_ankle_point = QPoint(int(right_ankle.x * frame.shape[1]), int(right_ankle.y * frame.shape[0]))\r\n\r\n # Calculate vectors for the left side\r\n left_vector1 = left_hip_point - left_knee_point\r\n left_vector2 = left_ankle_point - left_knee_point\r\n\r\n # Calculate vectors for the right side\r\n right_vector1 = right_hip_point - right_knee_point\r\n right_vector2 = right_ankle_point - right_knee_point\r\n\r\n # Calculate the angles in radians for both sides\r\n left_cosine_theta = (left_vector1.x() * left_vector2.x() + left_vector1.y() * left_vector2.y()) / \\\r\n (math.sqrt(left_vector1.x() ** 2 + left_vector1.y() ** 2) * \r\n math.sqrt(left_vector2.x() ** 2 + left_vector2.y() ** 2))\r\n\r\n right_cosine_theta = (right_vector1.x() * right_vector2.x() + right_vector1.y() * right_vector2.y()) / \\\r\n (math.sqrt(right_vector1.x() ** 2 + right_vector1.y() ** 2) * \r\n math.sqrt(right_vector2.x() ** 2 + right_vector2.y() ** 2))\r\n\r\n left_angle_rad = math.acos(left_cosine_theta)\r\n right_angle_rad = math.acos(right_cosine_theta)\r\n\r\n # Convert the angles to degrees\r\n left_angle_deg = math.degrees(left_angle_rad)\r\n right_angle_deg = math.degrees(right_angle_rad)\r\n if (left_angle_deg + right_angle_deg) < 170:\r\n print(\"SITTING\")\r\n self.alert_message = \"SITTING\"\r\n\r\n\r\n self.frame_counter = 0\r\n \r\n h, w, c = frame.shape\r\n qImg = QImage(frame.data, w, h, w * c, QImage.Format_RGB888)\r\n\r\n # Create a QPixmap for drawing rectangles\r\n pixmap = QPixmap.fromImage(qImg)\r\n painter = QPainter(pixmap)\r\n\r\n # Draw stored rectangles\r\n for start, end in self.rectangles:\r\n pen = QPen(QColor(255, 0, 0))\r\n pen.setWidth(2)\r\n painter.setPen(pen)\r\n painter.drawRect(start.x(), start.y(), end.x() - start.x(), end.y() - start.y())\r\n\r\n # Release the QPainter\r\n painter.end()\r\n\r\n # Set the QPixmap to the label\r\n self.label.setPixmap(pixmap)\r\n\r\n if self.alert_triggered:\r\n self.draw_alert_message(pixmap)\r\n if len(self.landmarks_sequence) > 0:\r\n # Get the bounding box coordinates\r\n landmarks = self.landmarks_sequence[-1]\r\n min_x, max_x = float('inf'), 0\r\n min_y, max_y = float('inf'), 0\r\n\r\n for landmark in landmarks.landmark:\r\n x, y = landmark.x, landmark.y\r\n min_x = min(min_x, x)\r\n max_x = max(max_x, x)\r\n min_y = min(min_y, y)\r\n max_y = max(max_y, y)\r\n\r\n min_x = int(min_x * frame.shape[1])\r\n max_x = int(max_x * frame.shape[1])\r\n min_y = int(min_y * frame.shape[0])\r\n max_y = int(max_y * frame.shape[0])\r\n cv2.rectangle(frame, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)\r\n\r\n # Draw a red bounding box around the detected skeleton\r\n self.label.setPixmap(pixmap)\r\n\r\n \r\n def draw_alert_message(self, pixmap):\r\n painter = QPainter(pixmap)\r\n pen = QPen()\r\n pen.setColor(QColor(255, 0, 0))\r\n pen.setWidth(2)\r\n painter.setPen(pen)\r\n\r\n font = QFont()\r\n font.setPointSize(50)\r\n painter.setFont(font)\r\n rect = self.label.rect()\r\n rect.setTop(100) # Adjust the position of the alert message\r\n painter.drawText(rect, Qt.AlignCenter, self.alert_message)\r\n if self.alert_message == \"FALLING\":#-----------------------------------\r\n app = wx.App(redirect=True)\r\n top = Frame('Companion Catch')\r\n top.Show()\r\n app.MainLoop()\r\n\r\n def closeEvent(self, event):\r\n self.cap.release()\r\n event.accept()\r\n\r\ndef main():\r\n app = QApplication(sys.argv)\r\n window = PoseDetectionApp()\r\n window.show()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "calvinbun/Companion-Catch", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 13387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "wx.Frame", "line_number": 12, "usage_type": "attribute"}, {"api_name": "wx.Frame.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 16, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 17, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 18, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "wx.Timer", "line_number": 25, "usage_type": "call"}, {"api_name": "wx.EVT_TIMER", "line_number": 26, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 34, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 50, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 53, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 72, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 76, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 86, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 87, "usage_type": "call"}, {"api_name": "mediapipe.solutions", "line_number": 93, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 94, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.Options", "line_number": 99, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 100, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 100, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 103, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.CrossCursor", "line_number": 119, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 123, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.ArrowCursor", "line_number": 136, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 136, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 156, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 219, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 220, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 221, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 222, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 223, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QPoint", "line_number": 224, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 236, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 237, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 240, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 241, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 243, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 244, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 247, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 248, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 257, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 257, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap.fromImage", "line_number": 260, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 260, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 261, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPen", "line_number": 265, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 295, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPainter", "line_number": 302, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QPen", "line_number": 303, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 304, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 308, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignCenter", "line_number": 313, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 313, "usage_type": "name"}, {"api_name": "wx.App", "line_number": 315, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 325, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 325, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "37000307877", "text": "import numpy as np\nimport tqdm\nimport matplotlib.pyplot as plt\nimport scipy.special as ssp\nimport mpmath as mp\nimport scipy.integrate\nimport h5py\nimport sys\nfrom sympy.physics.wigner import gaunt, wigner_3j\n\nkeV = 1e3\nGeV = 1e9\nmElectron = 511*keV\naEM = 1.0/137.035999139\na0 = 1/aEM/mElectron #Bohr radius\n\n'''\nJuly 2020\nxuexiao@mail.itp.ac.cn\nxxueitp@gmail.com\n\nFor simplicity, this program can only process one pair of (n,l) at a time.\n'''\n\n\nrv2real = lambda x: np.vectorize(np.real)(x)\nrv2imag = lambda x: np.vectorize(np.imag)(x)\nrv2float = lambda x : np.vectorize(float)(x)\nrv2log = lambda x : np.vectorize(mp.log)(x)\nrv2exp = lambda x : np.vectorize(mp.exp)(x)\n\ndef Save(file_name,data_name,data,silence=False):\n\n f = h5py.File(file_name,'a')\n\n try:\n f.create_dataset(data_name , data = data)\n if silence == False:\n print('>>\\''+file_name+'\\':', '\\''+data_name+'\\'' , 'saved')\n except ValueError:\n del f[data_name]\n f.create_dataset(data_name , data = data)\n #data0 = f[data_name]\n #data0[...] = data #renew the data\n if silence == False:\n print('>>\\''+file_name+'\\':', '\\''+data_name+'\\'' , 'rewritten.')\n\n f.close()\n \n \n\n'''\n\nmain\n\n'''\n\nclass pipeline:\n\n def __init__(self,C,Z,n_list,E_B,Z_eff,fac,file_name):\n # Get the Whole picture\n self.global_C = C\n self.global_Z = Z\n self.global_n_list = n_list\n self.global_E_B = E_B\n self.global_Z_eff = Z_eff\n self.global_fac = fac # impose a general normalization factor, K -> fac*K\n self.create_global_quantum_number_list()\n self.file_name = file_name\n \n \n\n self.QN_nllL = None\n self.r_grid = None\n self.kPrime_grid = None\n self.q_grid = None\n self.R_nlr_Table = None\n self.R_final_nllkr_Table = None\n self.Spherical_Jn_Lqr_Table = None\n self.I1 = None\n self.I2 = None\n self.I3 = None\n self.Atomic_Response_W1 = None\n self.Atomic_Response_K = None\n\n\n \n\n def save_which(self,start=0,stop=12):\n\n name_list = ['QN_nllL','r_grid','kPrime_grid','q_grid',\n 'R_nlr_Table','R_final_nllkr_Table','Spherical_Jn_Lqr_Table',\n 'I1','I2','I3','Atomic_Response_W1','Atomic_Response_K']\n\n save_list = [self.QN_nllL,self.r_grid,self.kPrime_grid,self.q_grid,\n self.R_nlr_Table,self.R_final_nllkr_Table,self.Spherical_Jn_Lqr_Table,\n self.I1,self.I2,self.I3,self.Atomic_Response_W1,self.Atomic_Response_K]\n\n for i in range(start,stop):\n if type(save_list[i])==type(None):\n print(name_list[i], 'is not calculated')\n else:\n Save( self.file_name , name_list[i] , save_list[i] )\n\n\n\n\n # global_QN = global quantum numbers for the element.\n def create_global_quantum_number_list(self,Lmax=100):\n\n global_QN = np.empty((0,4))\n\n for n in range(1,len(self.global_C)+1): \n for l in range(len(self.global_C[n-1])): \n for lPrime in range(Lmax+1): \n for L in range(abs(l-lPrime), l+lPrime+1): \n vec = np.array([n,l,lPrime,L])\n global_QN = np.vstack([global_QN,vec])\n \n self.global_QN = global_QN.astype(int)\n\n\n\n\n def set_r_grid(self,rmin,rmax,Nr):\n\n x_grid,weight = ssp.p_roots(Nr)\n r_grid = 0.5*(rmax-rmin)*x_grid+0.5*(rmax+rmin)\n\n self.Nr = int(Nr)\n self.r_grid = r_grid\n self.r_weight = weight\n self.rmin = r_grid[0]\n self.rmax = r_grid[-1]\n \n #print('rmin','%.3e'%self.r_grid[0],'rmax','%.3e'%self.r_grid[-1],'1/eV')\n \n\n\n\n def set_kPrime_grid(self,kPrime_grid):\n\n\n self.Nk = len(kPrime_grid)\n self.kPrime_grid = kPrime_grid\n \n #print('kmin','%.3e'%self.kPrime_grid[0],'kmax','%.3e'%self.kPrime_grid[-1],'eV')\n \n\n\n\n def set_q_grid(self,q_grid):\n\n self.Nq = len(q_grid)\n self.q_grid = q_grid\n\n #print('qmin','%.3e'%self.q_grid[0],'qmax','%.3e'%self.q_grid[-1],'eV')\n\n\n\n\n\n def set_demanded_n_l(self,n,l):\n \n Save(self.file_name,'n',n)\n Save(self.file_name,'l',l)\n Save(self.file_name,'E_B',self.global_E_B[n-1][l])\n \n\n #f1 = np.tile(self.global_QN[:,:],(np.size(n),1,1))\n #f2 = np.tile(n,(len(self.global_QN),1)).T\n #f3 = np.tile(l,(len(self.global_QN),1)).T\n\n #find = np.where( (f1[:,:,0]==f2)*(f1[:,:,1]==f3) )\n self.QN_nllL = self.global_QN[np.where( (self.global_QN[:,[0,1]]==[n,l]).all(axis=1) )]\n\n \n self.QN_nl = np.unique( self.QN_nllL[:,[0,1]],axis=0)\n self.QN_nll = np.unique( self.QN_nllL[:,[0,1,2]],axis=0)\n self.QN_L = np.unique( self.QN_nllL[:,[3]],axis=0)\n\n\n\n def _R_fun(self,n,l,r):\n\n r = np.tile(r,(1,1))\n\n C_vec = np.array(self.global_C[n-1][l])[:,None]\n Z_vec = np.array(self.global_Z[l])[:,None]\n n_vec = np.array(self.global_n_list[l])[:,None]\n factorize_vec = ssp.factorial(2*n_vec)\n factor_vec = np.power(2*Z_vec, n_vec + 0.5) / np.sqrt(factorize_vec)\n \n rw = np.sum( np.power(a0,-3./2.) * C_vec * factor_vec * np.power(r/a0, n_vec-1.)\\\n * np.exp(-Z_vec * r/a0) ,axis=0)\n return rw\n\n\n\n def calculate_initial_rwf(self):\n\n size = len(self.QN_nl)\n R_nlr_Table = np.ndarray(( size , self.Nr ))\n for it in range(size): \n n,l = self.QN_nl[it]\n R_nlr_Table[it] = self._R_fun(n,l,self.r_grid)\n\n self.R_nlr_Table = R_nlr_Table\n \n\n \n def _R_final_fun(self,n,l,lPrime,kPrime,r):\n\n Z_eff = self.global_Z_eff\n # notice it is a complex function\n hyp1f1 = np.vectorize(mp.hyp1f1)\n fac = 1 / np.math.factorial(2*lPrime+1)\n if fac == 0:\n result = 0.\n flag = 0\n else:\n hyp1f1_table = hyp1f1(lPrime+1+1j*Z_eff[n-1][l]/kPrime/a0,(2*lPrime+2),2j*kPrime*r ) \n part1 = lPrime*np.log(2*kPrime*r) \n part2 = rv2log(hyp1f1_table)\n part3 = -1J*kPrime*r\n part = rv2real(rv2exp(part1+part2+part3)) # It is to avoid divergence\n \n '''\n result = 4 * np.pi * (2*kPrime*r)**lPrime\\\n * np.exp(np.pi * Z_eff[n-1][l] /2/kPrime/a0 \\\n + np.real(ssp.loggamma(lPrime+1-1j*Z_eff[n-1][l]/kPrime/a0)) )*fac \\\n * np.exp(-1j*kPrime*r) \\\n * hyp1f1(lPrime+1+1j*Z_eff[n-1][l]/kPrime/a0,(2*lPrime+2),2j*kPrime*r )\n '''\n result = 4 * np.pi * part\\\n * np.exp(np.pi * Z_eff[n-1][l] /2/kPrime/a0 \\\n + np.real(ssp.loggamma(lPrime+1-1j*Z_eff[n-1][l]/kPrime/a0)) )*fac\n flag = 1\n \n \n return result , flag\n\n\n\n def calculate_final_rwf(self):\n \n \n kPrime_mesh, r_mesh = np.meshgrid(self.kPrime_grid, self.r_grid, indexing='ij')\n\n size = len(self.QN_nll)\n R_final_nllkr_Table = np.ndarray((size,self.Nk,self.Nr)) \n for it in range(size) : \n n,l,lPrime = self.QN_nll[it] \n val , flag = self._R_final_fun(n,l,lPrime,kPrime_mesh,r_mesh)\n R_final_nllkr_Table[it] = val\n self.R_final_nllkr_Table = R_final_nllkr_Table # constantly renew the data\n Save(self.file_name,'R_final_nllkr_Table',self.R_final_nllkr_Table,silence=True)\n print('>> Calculating R_final',it+1,'/',size,'n,l,l\\'=',(n,l,lPrime),'\\r',end='')\n \n if flag == 0:\n print('\\n>> Calculation is finished for orbit','n,l=',(n,l,lPrime),'at l\\'=',lPrime)\n break \n \n if flag == 1:\n print('\\n>> Calculation is finished for orbit','n,l=',(n,l,lPrime),'at l\\'=',lPrime) \n\n\n \n\n def calculate_spherical_Jn_Lqr_Table(self):\n\n\n q_mesh,r_mesh = np.meshgrid(self.q_grid,self.r_grid,indexing='ij')\n\n size = len(self.QN_L)\n Spherical_Jn_Lqr_Table = np.ndarray((size,self.Nq,self.Nr))\n for it in range(size): \n L = self.QN_L[it] \n Spherical_Jn_Lqr_Table[it] = ssp.spherical_jn(L ,q_mesh*r_mesh)\n \n self.Spherical_Jn_Lqr_Table = Spherical_Jn_Lqr_Table\n \n \n\n\n\n # To calculate I1(q=0,kPrime) with (n,l,lPrime=l,L=0), it is to be subtracted from I1.\n def get_I1q0(self):\n size = len(self.QN_nl)\n I1_q0 = np.ndarray((size,self.Nk))\n \n L0_index = np.where(self.QN_L ==0)[0] # find L=0 in L list\n\n for it in range(size):\n n,l = self.QN_nl[it]\n index_nl = np.where( (self.QN_nl==np.array([n,l])[None,:]).all(axis=1) )[0]\n index_nll = np.where( (self.QN_nll==np.array([n,l,l])[None,:]).all(axis=1) )[0] # find (n,l,lPrime=l)\n # (List,k,r)\n Integrand = (self.r_grid**2)[None,None,:] * self.R_nlr_Table[index_nl,None,:]\\\n * self.R_final_nllkr_Table[index_nll,:,:] * 1. #ssp.spherical_jn(0 ,0*r_mesh) = 1\n # (List,k) \n I1_q0[it] = 0.5*(self.rmax-self.rmin)* np.sum(self.r_weight[None,None,:]*Integrand,axis=2)\n \n self.I1_q0 = I1_q0\n \n \n\n\n '''\n Calculate Radial Integrals\n '''\n\n # I1 quantum numbers : (n,l,lPrime,L)\n # [List,k,q,r]\n def get_I1(self):\n \n size = len(self.QN_nllL)\n I1 = np.ndarray((size,self.Nk,self.Nq))\n\n for it in range(size):\n n,l,lPrime,L = self.QN_nllL[it]\n \n index_nl = np.where( (self.QN_nl[:,:]==[n,l]).all(axis=1) )[0]\n index_nll = np.where( (self.QN_nll[:,:]==[n,l,lPrime]).all(axis=1) )[0]\n index_L = np.where( (self.QN_L[:,:]==[L]).all(axis=1) )[0] \n\n Integrand = (self.r_grid**2)[None,None,None,:] * self.R_nlr_Table[index_nl,None,None,:]\\\n * self.R_final_nllkr_Table[index_nll,:,None,:] * self.Spherical_Jn_Lqr_Table[index_L,None,:,:]\n \n I1[it] = 0.5*(self.rmax-self.rmin)* np.sum(self.r_weight[None,None,None,:]*Integrand,axis=3) \n\n self.I1 = I1\n \n \n def get_I2(self):\n \n size = len(self.QN_nllL)\n I2 = np.ndarray((size,self.Nk,self.Nq))\n\n for it in range(size):\n n,l,lPrime,L = self.QN_nllL[it]\n \n index_nl = np.where( (self.QN_nl[:,:]==[n,l]).all(axis=1) )[0]\n index_nll = np.where( (self.QN_nll[:,:]==[n,l,lPrime]).all(axis=1) )[0]\n index_L = np.where( (self.QN_L[:,:]==[L]).all(axis=1) )[0] \n \n dRdr = np.diff( self.Spherical_Jn_Lqr_Table[:,:,:] , axis=2 )\n dRdr = np.dstack([dRdr,dRdr[:,:,-1][:,:,None]])\n Integrand = (self.r_grid**2)[None,None,None,:] * self.R_nlr_Table[index_nl,None,None,:]\\\n * self.R_final_nllkr_Table[index_nll,:,None,:] * dRdr[index_L,None,:,:]\n \n I2[it] = 0.5*(self.rmax-self.rmin)* np.sum(self.r_weight[None,None,None,:]*Integrand,axis=3)\n\n self.I2 = I2\n \n \n def get_I3(self):\n \n size = len(self.QN_nllL)\n I3 = np.ndarray((size,self.Nk,self.Nq))\n\n for it in range(size):\n n,l,lPrime,L = self.QN_nllL[it]\n \n index_nl = np.where( (self.QN_nl[:,:]==[n,l]).all(axis=1) )[0]\n index_nll = np.where( (self.QN_nll[:,:]==[n,l,lPrime]).all(axis=1) )[0]\n index_L = np.where( (self.QN_L[:,:]==[L]).all(axis=1) )[0] \n \n Integrand = self.r_grid[None,None,None,:] * self.R_nlr_Table[index_nl,None,None,:]\\\n * self.R_final_nllkr_Table[index_nll,:,None,:] * self.Spherical_Jn_Lqr_Table[index_L,None,:,:]\n \n I3[it] = 0.5*(self.rmax-self.rmin)* np.sum(self.r_weight[None,None,None,:]*Integrand,axis=3)\n \n\n self.I3 = I3\n \n \n '''\n Atomic Response\n '''\n\n\n def get_W1_atomic_response(self):\n \n \n '''\n size = len(self.QN_nllL)\n w1_raw = np.zeros([size,self.Nk,self.Nq])\n for i in range(size):\n n,l,lPrime,L = self.QN_nllL[i]\n coeff = (2*l+1)*(2*lPrime+1)*(2*L+1)*wigner_3j(l,lPrime,L,0,0,0)**2\n w1_raw[i] = Integral[i] * coeff * 4 * self.kPrime_grid[None,:,None]**3 /( 2*np.pi )**2\n '''\n \n # sum up\n size0 = len(self.QN_nl)\n W1 = np.zeros([size0,self.Nk,self.Nq])\n for i in range(size0):\n n,l = self.QN_nl[i]\n index_nllL = np.where( ( self.QN_nllL[:,[0,1]]==[n,l] ).all(axis=1) )[0]\n for j in index_nllL:\n n,l,lPrime,L = self.QN_nllL[j]\n coeff = (2*l+1)*(2*lPrime+1)*(2*L+1)*wigner_3j(l,lPrime,L,0,0,0)**2\n W1[i] = W1[i] + self.I1[j]**2 * coeff * 4 * self.kPrime_grid[:,None]**3 /( 2*np.pi )**3\n if L==0 and l==lPrime:\n correction = 4*self.kPrime_grid[:,None]**3/( 2*np.pi )**3*\\\n (2*l+1)*(self.I1_q0[i][:,None]**2 - 2*self.I1_q0[i][:,None]*self.I1[j]) \n correction0 = np.where(correction<0 , correction , 0.)\n W1[i] += correction0\n \n \n self.Atomic_Response_W1 = self.global_fac * W1 \n \n \n '''\n\n # make a general list with m and m'\n QNmm = np.empty((0,6))\n \n for n,l,lPrime,L in self.QN_nllL:\n for m in range(-l,l+1):\n for mPrime in range(-lPrime,lPrime+1):\n add = [n,l,lPrime,L,m,mPrime]\n QNmm = np.vstack([QNmm,add])\n\n QNmm = QNmm.astype(int)\n QNmm_Aux = np.unique(QNmm[:,:4],axis=0,return_inverse=True)[1]\n\n\n # calculating integral's coefficients \n coeff_f12 = np.empty(0)\n for n,l,lPrime,L,m,mPrime in QNmm: \n n,l,lPrime,L,m,mPrime = int(n),int(l),int(lPrime),int(L),int(m),int(mPrime) \n add = np.sqrt(4*np.pi) * 1j**L * (-1.)**mPrime * np.sqrt(2*L+1) * \\\n float( gaunt(l,lPrime,L,m,-mPrime,0) ) \n coeff_f12 = np.append(coeff_f12,add)\n raw_f12 = coeff_f12[:,None,None] * self.Integral[QNmm_Aux,:,:]\n del coeff_f12 \n\n\n\n # make a shorter list in order to sum up all the L\n QN_nllmm, QN_nllmm_Aux = np.unique( QNmm[:,[0,1,2,4,5]] , axis=0 , return_inverse=True )\n QN_nl , QN_nl_Aux = np.unique( QN_nllmm[:,[0,1]] , axis=0 , return_inverse=True )\n f12_gen = lambda index : np.sum(raw_f12[np.where(QN_nllmm_Aux==index)[0],:,:],axis=0)\n f12 = np.zeros((len(QN_nllmm) , self.Nk , self.Nq ), dtype=complex)\n\n for it in range(len(QN_nllmm)):\n f12[it] = f12_gen(it)\n del raw_f12\n\n\n # calculating w1 without summation\n raw_w1 = np.abs(f12)**2\n del f12\n \n\n # calculating W1\n size = len(QN_nl)\n Atomic_Response_W1 = np.zeros([size, self.Nk, self.Nq])\n for it in range(size):\n Atomic_Response_W1[it] = np.sum( raw_w1[ np.where(QN_nl_Aux==it)[0],:,:] ,axis=0)\\\n * 4 * (self.kPrime_grid**3)[None,:,None] / (2*np.pi)**3\n\n self.Atomic_Response_W1 = self.global_fac * Atomic_Response_W1\n '''\n \n \n \n\n\n\n\n\n\n\n def get_K_atomic_response(self):\n \n W1 = self.Atomic_Response_W1\n Ee_grid = np.sqrt( self.kPrime_grid**2 + mElectron**2 ) - mElectron\n \n counts = len(self.QN_nl)\n K = np.zeros(( counts ,self.Nk,self.Nq))\n \n for it in range(counts):\n n,l = self.QN_nl[it]\n K[it] = W1[it]*(aEM*mElectron)**2/4./mElectron/Ee_grid[:,None]\n\n self.Atomic_Response_K = K\n \n\n\n\n\n\n\n def run_all_calculations(self):\n self.save_which(0,4)\n \n self.calculate_initial_rwf()\n self.calculate_final_rwf()\n self.calculate_spherical_Jn_Lqr_Table()\n \n #self.save_which(4,6)\n \n self.get_I1q0()\n self.get_I1()\n #self.get_I2()\n #self.get_I3()\n \n #self.save_which(6,10)\n \n self.get_W1_atomic_response()\n self.get_K_atomic_response()\n \n self.save_which(10,12)\n\n\n\n\n", "repo_name": "XueXiao-Physics/AtomIonCalc", "sub_path": "methods.py", "file_name": "methods.py", "file_ext": "py", "file_size_in_byte": 16615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.vectorize", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.vectorize", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.imag", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.vectorize", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 29, "usage_type": "call"}, {"api_name": "mpmath.log", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.vectorize", "line_number": 30, "usage_type": "call"}, {"api_name": "mpmath.exp", "line_number": 30, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 118, "usage_type": "call"}, {"api_name": "scipy.special.p_roots", "line_number": 127, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 127, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "scipy.special.factorial", "line_number": 191, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 191, "usage_type": "name"}, {"api_name": "numpy.power", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 216, "usage_type": "call"}, {"api_name": "mpmath.hyp1f1", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.math.factorial", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.math", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 237, "usage_type": "call"}, {"api_name": "scipy.special.loggamma", "line_number": 237, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 237, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 276, "usage_type": "call"}, {"api_name": "scipy.special.spherical_jn", "line_number": 279, "usage_type": "call"}, {"api_name": "scipy.special", "line_number": 279, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 345, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 400, "usage_type": "call"}, {"api_name": "sympy.physics.wigner.wigner_3j", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 404, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 406, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 408, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 483, "usage_type": "call"}]} +{"seq_id": "72660794683", "text": "import sys\nsys.path.append('../SearchTree')\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import RadioButtons, Button, TextBox\n\nfrom SearchTree import Node, Tree\nfrom SearchTree.PathFindingSearchTree import PathFindingSearchTree\nfrom math import dist\n\n\n\nclass PathFinding_GUI(PathFindingSearchTree):\n def __init__(self, rootNode=(0,0), path='', walkableColor = [1., 1., 1., 1.], heuristic = 'LineOfSight'):\n super().__init__(rootNode, path, walkableColor, heuristic)\n\n self.vismap = mpimg.imread(path)\n\n self.goal = None\n self.start = None\n self.searchType = 'BFS'\n\n self.fig,self.im_ax = plt.subplots(1,1)\n self.im = self.im_ax.imshow(self.vismap)\n\n self.fig.canvas.mpl_connect('button_press_event', self.GUIonclick)\n\n #promptTxt_ax = self.fig.add_axes([0.05, 0.9, 0.15, 0.15])\n self.promptTxt = plt.text(0, -10, 'Left click to set the starting point')\n\n searchType_ax = self.fig.add_axes([0.05, 0.7, 0.15, 0.15])\n searchType_radio = RadioButtons(searchType_ax, ('BFS', 'DFS', 'A*'))\n searchType_radio.on_clicked(self.changeSearchType)\n\n startBtn_ax = self.fig.add_axes([0.05, 0.3, 0.15, 0.15])\n self.startBtn = Button(startBtn_ax, \"Start!\", color=\"red\")\n self.startBtn.on_clicked(self.GUI_startBtn)\n\n resetBtn_ax = self.fig.add_axes([0.05, 0.1, 0.15, 0.15])\n resetBtn = Button(resetBtn_ax, \"Reset\")\n resetBtn.on_clicked(self.GUI_reset)\n\n plt.show()\n \n def showVisualization(self, expandedNode):\n tmp=self.vismap.copy()\n tmp[self.root.data[0]][self.root.data[1]] = [0., 0., 0.5, 1.]\n tmp[self.goal[0]][self.goal[1]] = [0., 1., 0., 1.]\n\n n=expandedNode\n while n.parent != None:\n tmp[n.data[0]][n.data[1]] = [0., 0.75, 0.75, 1.]\n n=n.parent\n self.im.set_data(tmp)\n plt.draw()\n #self.fig.canvas.draw_idle()\n plt.pause(1)\n\n def expandNode(self, node):\n if self.showVisualization:\n self.vismap[node.data[0]][node.data[1]] = [1., 0., 0., 1.]\n childList=[]\n y, x = node.data\n #up\n if y>0 and (self.map[y-1][x]==self.walkableColor).all():\n newNode = Node((y-1,x))\n newNode.parent=node\n childList.append(newNode)\n #down\n if y0 and (self.map[y][x-1]==self.walkableColor).all():\n newNode = Node((y,x-1))\n newNode.parent=node\n childList.append(newNode)\n #right\n if x str:\n return secrets.token_hex()\n\n @staticmethod\n def _get_batched_repository_files(\n repository_files: dict[str, bytes]\n ) -> list[dict[str, dict[str, bytes]]]:\n MAX_BATCH_SIZE_BYTES = 10 * 1024**2\n\n batches = []\n empty_batch = {\"files\": {}}\n current_batch = copy.deepcopy(empty_batch)\n current_batch_size_bytes = 0\n\n for file_path, file_content in repository_files.items():\n file_size_bytes = len(file_content)\n\n current_batch[\"files\"][file_path] = file_content\n current_batch_size_bytes += file_size_bytes\n\n if current_batch_size_bytes + file_size_bytes > MAX_BATCH_SIZE_BYTES:\n batches.append(current_batch)\n current_batch = copy.deepcopy(empty_batch)\n current_batch_size_bytes = 0\n\n if current_batch != empty_batch:\n batches.append(current_batch)\n\n return batches\n\n @classmethod\n def _add_batches_request_metadata(\n cls, batches: list[dict[str, dict[str, bytes]]]\n ) -> list[dict[str, dict[str, bytes] | int | str]]:\n session_id = cls._generate_request_session_id()\n\n for i, batch in enumerate(batches):\n batch.update(\n {\n \"batch_number\": i + 1,\n \"num_total_batches\": len(batches),\n \"session_id\": session_id,\n }\n )\n\n return batches\n\n @staticmethod\n def _make_batch_request(\n payload: dict[str, dict[str, bytes] | int | str]\n ) -> dict[str, str]:\n response = requests.post(\n url=f\"{config.INSIGHT_API_BASE_URL}/initialize_repository\",\n cookies={\"session_id\": payload[\"session_id\"]},\n files=payload[\"files\"],\n data={\n \"batch_num\": payload[\"batch_number\"],\n \"num_total_batches\": payload[\"num_total_batches\"],\n },\n )\n\n response.raise_for_status()\n\n return response.json()\n\n @classmethod\n def make_request(cls, repository_files: dict[str, bytes]) -> dict[str, str]:\n request_batches = cls._add_batches_request_metadata(\n cls._get_batched_repository_files(repository_files)\n )\n\n with ThreadPoolExecutor(max_workers=len(request_batches)) as executor:\n results = executor.map(cls._make_batch_request, request_batches)\n return {\"repository_id\": result[\"repository_id\"] for result in results}\n", "repo_name": "ChenGrant/insight-cli", "sub_path": "insight_cli/api/initialize_repository_api.py", "file_name": "initialize_repository_api.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "base.api.API", "line_number": 8, "usage_type": "name"}, {"api_name": "secrets.token_hex", "line_number": 11, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 21, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 61, "usage_type": "call"}, {"api_name": "insight_cli.config.INSIGHT_API_BASE_URL", "line_number": 62, "usage_type": "attribute"}, {"api_name": "insight_cli.config", "line_number": 62, "usage_type": "name"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "10407728970", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 27 17:13:06 2020\n\n@author: dewiballard\n\"\"\"\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.clock import Clock\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.image import Image\nfrom kivy.core.window import Window\nimport re\nimport cv2\nimport os\nimport numpy as np\nimport pytesseract\n\nclass PuzzleSelector(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 2\n \n self.add_widget(Label(text='Sudoku solver app'))\n \n self.easy = Button(text='Easy (2x3)')\n self.add_widget(self.easy)\n self.easy.bind(on_press=self.grid_size)\n self.easy.bind(on_press=self.Grid_button)\n \n self.medium = Button(text='Medium (3x3)')\n self.add_widget(self.medium)\n self.medium.bind(on_press=self.grid_size)\n self.medium.bind(on_press=self.Grid_button)\n \n self.hard = Button(text='Hard (3x4)')\n self.add_widget(self.hard)\n self.hard.bind(on_press=self.grid_size)\n self.hard.bind(on_press=self.Grid_button)\n \n self.extreme = Button(text='Extreme (4x4)')\n self.add_widget(self.extreme)\n self.extreme.bind(on_press=self.grid_size)\n self.extreme.bind(on_press=self.Grid_button)\n \n self.impossible = Button(text='Impossible (5x5)')\n self.add_widget(self.impossible)\n self.impossible.bind(on_press=self.grid_size)\n self.impossible.bind(on_press=self.Grid_button)\n \n def grid_size(self, instance):\n global n_cols\n global n_cells\n if instance.text == 'Easy (2x3)':\n n_cols = 6\n n_cells = 36\n if instance.text == 'Medium (3x3)':\n n_cols = 9\n n_cells = 81\n if instance.text == 'Hard (3x4)':\n n_cols = 12\n n_cells = 144\n if instance.text == 'Extreme (4x4)':\n n_cols = 16\n n_cells = 256\n if instance.text == 'Impossible (5x5)':\n n_cols = 25\n n_cells = 625\n \n global listed_out\n listed_out = (np.zeros((n_cols, n_cols), dtype=np.uint8)).flatten()\n \n def Grid_button(self, instance):\n info = f'Attempting to configure your grid, please enter your known values...'\n PuzzleApp.info_page.update_info(info)\n PuzzleApp.screen_manager.current = 'Info'\n Clock.schedule_once(self.grid, 2)\n \n def grid(self, _): \n PuzzleApp.create_grid()\n PuzzleApp.screen_manager.current = 'Grid'\n\nclass InfoPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.message = Label(halign='center', valign='middle', font_size=60)\n self.message.bind(width=self.update_text_width)\n self.add_widget(self.message)\n \n def update_info(self, message):\n self.message.text = message\n \n def update_text_width(self, *_):\n self.message.text_size = (self.message.width*0.9, None)\n\nclass GridPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n number_of_columns = n_cols\n number_of_cells = n_cells\n self.cols = number_of_columns\n \n global my_dict\n my_dict = {}\n for i in range(number_of_cells):\n self.cells = FloatInput(multiline=False, font_size=35)\n self.add_widget(self.cells)\n a = str('cell'+str(i+1))\n my_dict[a] = self.cells\n \n self.image_it = Button(text='Take image', size_hint = (1, 2))\n self.add_widget(self.image_it)\n self.image_it.bind(on_press=self.Camera_button) \n \n for i in range(number_of_columns-2):\n self.name = Label(text='')\n self.add_widget(self.name)\n \n self.solve_it = Button(text='Solve it', size_hint = (1, 2))\n self.add_widget(self.solve_it)\n self.solve_it.bind(on_press=self.Results_button)\n \n def Camera_button(self, instance):\n notice = f'Opening camera'\n PuzzleApp.notice_page.update_info(notice)\n PuzzleApp.screen_manager.current = 'Notice'\n Clock.schedule_once(self.camera, 1.5)\n \n def camera(self, _): \n PuzzleApp.camera_window()\n PuzzleApp.screen_manager.current = 'Camera'\n \n def Results_button(self, instance):\n message = f'Solving grid, please wait...'\n PuzzleApp.message_page.update_info(message)\n PuzzleApp.screen_manager.current = 'Message'\n global my_dict2\n my_dict2 = {}\n for item in my_dict:\n if my_dict[item].text == '':\n my_dict2[item] = 0\n else:\n my_dict2[item] = my_dict[item].text\n print(my_dict2)\n Clock.schedule_once(self.results, 3)\n \n def results(self, _): \n PuzzleApp.results_grid()\n PuzzleApp.screen_manager.current = 'Results'\n\n\nclass GridPageTwo(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n number_of_columns = n_cols\n number_of_cells = n_cells\n self.cols = number_of_columns\n \n global my_dict\n my_dict = {}\n for i in range(number_of_cells):\n print(listed_out[i])\n if int(listed_out[i]) != 0:\n self.cells = FloatInput(text = str(listed_out[i]), multiline=False, font_size=35)\n self.add_widget(self.cells)\n a = str('cell'+str(i+1))\n my_dict[a] = self.cells\n else:\n self.cells = FloatInput(multiline=False, font_size=35)\n self.add_widget(self.cells)\n a = str('cell'+str(i+1))\n my_dict[a] = self.cells\n\n self.image_it = Button(text='Take image', size_hint = (1, 2))\n self.add_widget(self.image_it)\n self.image_it.bind(on_press=self.Camera_button) \n \n for i in range(number_of_columns-2):\n self.name = Label(text='')\n self.add_widget(self.name)\n \n self.solve_it = Button(text='Solve it', size_hint = (1, 2))\n self.add_widget(self.solve_it)\n self.solve_it.bind(on_press=self.Results_button)\n \n def Camera_button(self, instance):\n notice = f'Opening camera'\n PuzzleApp.notice_page.update_info(notice)\n PuzzleApp.screen_manager.current = 'Notice'\n Clock.schedule_once(self.camera, 1.5)\n \n def camera(self, _): \n PuzzleApp.camera_window()\n PuzzleApp.screen_manager.current = 'Camera'\n \n def Results_button(self, instance):\n message = f'Solving grid, please wait...'\n PuzzleApp.message_page.update_info(message)\n PuzzleApp.screen_manager.current = 'Message'\n global my_dict2\n my_dict2 = {}\n for item in my_dict:\n if my_dict[item].text == '':\n my_dict2[item] = 0\n else:\n my_dict2[item] = my_dict[item].text\n print(my_dict2)\n Clock.schedule_once(self.results, 3)\n \n def results(self, _): \n PuzzleApp.results_grid()\n PuzzleApp.screen_manager.current = 'Results'\n\nclass FloatInput(TextInput):\n pat = re.compile('[^0-9]')\n def insert_text(self, substring, from_undo=False):\n pat = self.pat\n if '[^0-9]' in self.text:\n s = re.sub(pat, '', substring)\n else:\n s = '[^0-9]'.join([re.sub(pat, '', s) for s in substring.split('[^0-9]', 1)])\n return super(FloatInput, self).insert_text(s, from_undo=from_undo)\n\nclass NoticePage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.message = Label(halign='center', valign='middle', font_size=60)\n self.message.bind(width=self.update_text_width)\n self.add_widget(self.message)\n \n def update_info(self, message):\n self.message.text = message\n \n def update_text_width(self, *_):\n self.message.text_size = (self.message.width*0.9, None)\n\nBuilder.load_string('''\n:\n orientation: 'vertical'\n Camera:\n size_hint: 1, 1\n id: camera\n resolution: (1280, 720)\n keep_ratio: False\n allow_stretch: True\n pos_hint: {\"center_x\":0.5, \"center_y\":0.5}\n size_hint_y: 0.65\n size_hint_x: 1\n play: True\n ToggleButton:\n text: 'Take / Retake'\n on_press: camera.play = not camera.play\n size_hint_y: None\n height: '48dp'\n Button:\n text: 'Use image'\n size_hint_y: None\n height: '48dp'\n on_press: root.capture()\n''') #on_press: go to grid page, with the numbers read filled in...\n \nclass CameraPage(BoxLayout):\n def capture(self):\n camera = self.ids['camera']\n camera.export_to_png(\"puzzle_img.png\")\n img = cv2.imread(\"puzzle_img.png\")\n os.remove(\"puzzle_img.png\")\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_clean = cv2.GaussianBlur(img_gray, (5, 5), 0)\n img_resize = cv2.resize(img_clean, (1080, 1080))\n global thresh\n ret, thresh = cv2.threshold(img_resize, 5, 255, cv2.THRESH_OTSU)\n cv2.imwrite(\"./output_image.png\", thresh)\n PuzzleApp.corner_window()\n PuzzleApp.screen_manager.current = 'Corner Selector'\n\nclass CornerPage(FloatLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n size = Window.size\n self.img = Image(source ='output_image.png', keep_ratio=False, allow_stretch=True) \n self.add_widget(self.img)\n self.use_img = Button(text='Use image', size_hint = (0.2, 0.1))\n self.use_img.pos = (size[0]*0.8, size[1]*0.05)\n self.add_widget(self.use_img)\n self.use_img.bind(on_press=self.read_image)\n self.topleft = Image(source ='red_circle.png', size_hint = (0.02, 0.02))\n self.topleft.pos = (size[0]*0.2, size[1]*0.8)\n self.add_widget(self.topleft)\n self.topright = Image(source ='red_circle.png', size_hint = (0.02, 0.02)) \n self.topright.pos = (size[0]*0.8, size[1]*0.8)\n self.add_widget(self.topright)\n self.bottomleft = Image(source ='red_circle.png', size_hint = (0.02, 0.02)) \n self.bottomleft.pos = (size[0]*0.2, size[1]*0.2)\n self.add_widget(self.bottomleft)\n self.bottomright = Image(source ='red_circle.png', size_hint = (0.02, 0.02)) \n self.bottomright.pos = (size[0]*0.8, size[1]*0.2)\n self.add_widget(self.bottomright)\n\n def on_touch_move(self,touch):\n t_x,t_y = touch.pos\n tl_x,tl_y = self.topleft.pos\n tr_x,tr_y = self.topright.pos\n bl_x,bl_y = self.bottomleft.pos\n br_x,br_y = self.bottomright.pos\n if tl_x-100 < t_x < tl_x+100 and tl_y-100 < t_y < tl_y+100:\n self.topleft.pos = (t_x, t_y)\n if tr_x-100 < t_x < tr_x+100 and tr_y-100 < t_y < tr_y+100:\n self.topright.pos = (t_x, t_y)\n if bl_x-100 < t_x < bl_x+100 and bl_y-100 < t_y < bl_y+100:\n self.bottomleft.pos = (t_x, t_y)\n if br_x-100 < t_x < br_x+100 and br_y-100 < t_y < br_y+100:\n self.bottomright.pos = (t_x, t_y)\n \n def read_image(self, instance):\n size = Window.size\n top_left_coords = 1080*self.topleft.pos[0]/size[0], 1080 - 1080*self.topleft.pos[1]/size[1]\n top_right_coords = 1080*self.topright.pos[0]/size[0], 1080 - 1080*self.topright.pos[1]/size[1]\n bottom_left_coords = 1080*self.bottomleft.pos[0]/size[0], 1080 - 1080*self.bottomleft.pos[1]/size[1] \n bottom_right_coords = 1080*self.bottomright.pos[0]/size[0], 1080 - 1080*self.bottomright.pos[1]/size[1]\n \n src = np.float32([top_left_coords,\n top_right_coords,\n bottom_left_coords,\n bottom_right_coords])\n\n dst = np.float32([(0, 0),\n (1080, 0),\n (0, 1080),\n (1080, 1080)])\n \n self.unwarp(thresh, src, dst, True)\n \n def unwarp(self, img, src, dst, testing):\n h, w = img.shape[:2]\n # use cv2.getPerspectiveTransform() to get M, the transform matrix\n M = cv2.getPerspectiveTransform(src, dst)\n # use cv2.warpPerspective() to warp image to correct angle\n warped = cv2.warpPerspective(img, M, (w, h), flags=cv2.INTER_LINEAR)\n slicer = int(1080/n_cols)\n margin = int((1080/n_cols)*0.083)\n out = np.zeros((n_cols, n_cols), dtype=np.uint8)\n for x in range(n_cols):\n for y in range(n_cols):\n num = pytesseract.image_to_string(warped[margin + x*slicer:(x+1)*slicer - margin, margin + y*slicer:(y+1)*slicer - margin], lang ='eng', config='--psm 8 --oem 1 -c tessedit_char_whitelist=0123456789')\n if num:\n out[x, y] = num\n print(out)\n global listed_out; listed_out = list(out.flatten())\n Clock.schedule_once(self.grid_two, 0.5)\n\n def grid_two(self, _): \n PuzzleApp.create_grid_two()\n PuzzleApp.screen_manager.current = 'GridTwo'\n \nclass MessagePage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.message = Label(halign='center', valign='middle', font_size=60)\n self.message.bind(width=self.update_text_width)\n self.add_widget(self.message)\n \n def update_info(self, message):\n self.message.text = message\n \n def update_text_width(self, *_):\n self.message.text_size = (self.message.width*0.9, None)\n \nclass ResultsPage(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n number_of_columns = n_cols\n number_of_cells = n_cells\n self.cols = number_of_columns\n \n for i in range(number_of_cells):\n self.name = Label(text='input', font_size = 35)\n self.add_widget(self.name)\n \nclass SudokuSolverApp(App):\n def build(self):\n self.screen_manager = ScreenManager()\n \n self.puzzle_selector = PuzzleSelector() \n screen = Screen(name='Connect')\n screen.add_widget(self.puzzle_selector)\n self.screen_manager.add_widget(screen)\n \n self.info_page = InfoPage()\n screen = Screen(name='Info')\n screen.add_widget(self.info_page)\n self.screen_manager.add_widget(screen)\n \n self.notice_page = NoticePage()\n screen = Screen(name='Notice')\n screen.add_widget(self.notice_page)\n self.screen_manager.add_widget(screen)\n \n self.message_page = MessagePage()\n screen = Screen(name='Message')\n screen.add_widget(self.message_page)\n self.screen_manager.add_widget(screen)\n \n return self.screen_manager\n\n def create_grid(self):\n self.grid_page = GridPage()\n screen = Screen(name='Grid')\n screen.add_widget(self.grid_page)\n self.screen_manager.add_widget(screen) \n \n def create_grid_two(self):\n self.grid_page_two = GridPageTwo()\n screen = Screen(name='GridTwo')\n screen.add_widget(self.grid_page_two)\n self.screen_manager.add_widget(screen) \n\n def camera_window(self):\n self.camera_page = CameraPage()\n screen = Screen(name='Camera')\n screen.add_widget(self.camera_page)\n self.screen_manager.add_widget(screen) \n \n def corner_window(self):\n self.corner_page = CornerPage()\n screen = Screen(name='Corner Selector')\n screen.add_widget(self.corner_page)\n self.screen_manager.add_widget(screen) \n \n def results_grid(self):\n self.results_page = ResultsPage()\n screen = Screen(name='Results')\n screen.add_widget(self.results_page)\n self.screen_manager.add_widget(screen) \n \nif __name__ == '__main__':\n PuzzleApp = SudokuSolverApp()\n PuzzleApp.run()\n ", "repo_name": "dewiballard/Sudoku_app", "sub_path": "SudokuSolverAPP.py", "file_name": "SudokuSolverAPP.py", "file_ext": "py", "file_size_in_byte": 16444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 26, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 33, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 38, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 43, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 48, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 78, "usage_type": "attribute"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 84, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 84, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 90, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 94, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 104, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 119, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 124, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 127, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 135, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 135, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 153, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 153, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 160, "usage_type": "name"}, {"api_name": "kivy.uix.button.Button", "line_number": 182, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 187, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 190, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 198, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 198, "usage_type": "name"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 216, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 216, "usage_type": "name"}, {"api_name": "kivy.uix.textinput.TextInput", "line_number": 222, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 223, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 227, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 229, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 232, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 236, "usage_type": "call"}, {"api_name": "kivy.lang.Builder.load_string", "line_number": 246, "usage_type": "call"}, {"api_name": "kivy.lang.Builder", "line_number": 246, "usage_type": "name"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 271, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 275, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 276, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 277, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 277, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 278, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 279, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 281, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 281, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 282, "usage_type": "call"}, {"api_name": "kivy.uix.floatlayout.FloatLayout", "line_number": 286, "usage_type": "name"}, {"api_name": "kivy.core.window.Window.size", "line_number": 289, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 289, "usage_type": "name"}, {"api_name": "kivy.uix.image.Image", "line_number": 290, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 292, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 296, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 299, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 302, "usage_type": "call"}, {"api_name": "kivy.uix.image.Image", "line_number": 305, "usage_type": "call"}, {"api_name": "kivy.core.window.Window.size", "line_number": 325, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 325, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 336, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 346, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 348, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 348, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 351, "usage_type": "attribute"}, {"api_name": "pytesseract.image_to_string", "line_number": 354, "usage_type": "call"}, {"api_name": "kivy.clock.Clock.schedule_once", "line_number": 359, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 359, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 365, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 369, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 379, "usage_type": "name"}, {"api_name": "kivy.uix.label.Label", "line_number": 387, "usage_type": "call"}, {"api_name": "kivy.app.App", "line_number": 390, "usage_type": "name"}, {"api_name": "kivy.uix.screenmanager.ScreenManager", "line_number": 392, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 395, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 400, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 405, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 410, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 418, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 424, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 430, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 436, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.Screen", "line_number": 442, "usage_type": "call"}]} +{"seq_id": "71710035295", "text": "import functools, BigWorld\nfrom arena_component_system.sector_base_arena_component import ID_TO_BASENAME\nfrom debug_utils import LOG_ERROR\nfrom gui.Scaleform.locale.EPIC_BATTLE import EPIC_BATTLE\nfrom gui.battle_control.arena_info.arena_vos import EPIC_BATTLE_KEYS\nfrom gui.shared import g_eventBus, EVENT_BUS_SCOPE\nfrom gui.shared.events import MessengerEvent\nfrom helpers import dependency\nfrom helpers import i18n\nfrom messenger.ext import isBattleChatEnabled\nfrom messenger.ext.player_helpers import isCurrentPlayer\nfrom messenger.formatters import chat_message\nfrom messenger.formatters.users_messages import getBroadcastIsInCoolDownMessage\nfrom messenger.gui.Scaleform.channels.layout import BattleLayout\nfrom messenger.m_constants import CLIENT_ERROR_ID\nfrom messenger.m_constants import PROTO_TYPE, MESSENGER_COMMAND_TYPE\nfrom messenger.proto import proto_getter\nfrom messenger.proto.events import g_messengerEvents\nfrom messenger.proto.shared_errors import ClientError\nfrom messenger_common_chat2 import MESSENGER_LIMITS\nfrom skeletons.gui.battle_session import IBattleSessionProvider\nfrom soft_exception import SoftException\n\nclass _check_arena_in_waiting(object):\n sessionProvider = dependency.descriptor(IBattleSessionProvider)\n\n def __call__(self, func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if not self.sessionProvider.arenaVisitor.isArenaInWaiting():\n func(*args, **kwargs)\n else:\n g_messengerEvents.onErrorReceived(ClientError(CLIENT_ERROR_ID.WAITING_BEFORE_START))\n\n return wrapper\n\n\nclass _ChannelController(BattleLayout):\n sessionProvider = dependency.descriptor(IBattleSessionProvider)\n\n def __init__(self, channel, messageBuilder, isSecondaryChannelCtrl=False):\n super(_ChannelController, self).__init__(channel, messageBuilder, isSecondaryChannelCtrl)\n self.activate()\n\n @proto_getter(PROTO_TYPE.BW_CHAT2)\n def proto(self):\n return\n\n def getSettings(self):\n return self._channel.getProtoData().settings\n\n def filterMessage(self, cmd):\n return True\n\n def clear(self):\n if not self._isSecondaryChannelCtrl:\n self._channel.setJoined(False)\n super(_ChannelController, self).clear()\n\n def activate(self):\n g_eventBus.handleEvent(MessengerEvent(MessengerEvent.BATTLE_CHANNEL_CTRL_INITED, {'controller': self}), scope=EVENT_BUS_SCOPE.BATTLE)\n\n def canSendMessage(self):\n if not self.isEnabled():\n return (False, '')\n if self.proto.arenaChat.isBroadcastInCooldown():\n return (False,\n getBroadcastIsInCoolDownMessage(MESSENGER_LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC))\n return (True, '')\n\n def _formatMessage(self, message, doFormatting=True):\n avatarSessionID = message.avatarSessionID\n isCurrent = isCurrentPlayer(avatarSessionID)\n if not doFormatting:\n return (isCurrent, message.text)\n return (isCurrent,\n self._mBuilder.setColors(avatarSessionID).setName(avatarSessionID, message.accountName).setText(message.text).build())\n\n def _formatCommand(self, command):\n raise SoftException('This method should not be reached in this context')\n\n\nclass TeamChannelController(_ChannelController):\n\n def __init__(self, channel):\n super(TeamChannelController, self).__init__(channel, chat_message.TeamMessageBuilder())\n\n @_check_arena_in_waiting()\n def sendCommand(self, command):\n self.proto.battleCmd.send(command)\n\n def filterMessage(self, cmd):\n arenaDP = self.sessionProvider.getArenaDP()\n if arenaDP is None:\n return True\n else:\n senderVehicleID = arenaDP.getVehIDBySessionID(cmd.getSenderID())\n return not arenaDP.isPlayerObserver() or senderVehicleID == BigWorld.player().playerVehicleID or arenaDP.isAlly(senderVehicleID)\n\n @_check_arena_in_waiting()\n def _broadcast(self, message):\n self.proto.arenaChat.broadcast(message, 0)\n\n def isEnabled(self):\n result = super(TeamChannelController, self).isEnabled()\n arenaDP = self.sessionProvider.getArenaDP()\n arenaVisitor = self.sessionProvider.arenaVisitor\n hasAnyTeammates = arenaDP.getAlliesVehiclesNumber() > 1\n isObserver = arenaDP.isPlayerObserver()\n isBattleRoyale = arenaVisitor.gui.isBattleRoyale()\n return result and (hasAnyTeammates or isObserver) and not isBattleRoyale\n\n def _formatCommand(self, command):\n isCurrent = False\n if command.getCommandType() == MESSENGER_COMMAND_TYPE.BATTLE:\n avatarSessionID = command.getSenderID()\n isCurrent = command.isSender()\n text = self._mBuilder.setColors(avatarSessionID).setName(avatarSessionID).setText(command.getCommandText()).build()\n else:\n text = command.getCommandText()\n return (isCurrent, text)\n\n\n_EPIC_MINIMAP_ZOOM_MODE_SCALE = 500\n_NONCAPTURED_BASES_FOR_LANE_DICT = {1: {1: 4, 2: 1}, 2: {1: 5, 2: 2}, 3: {1: 6, 2: 3}}\n\nclass EpicTeamChannelController(TeamChannelController):\n\n def __getNameSuffix(self, avatarSessionID):\n suffix = ''\n componentSystem = self.sessionProvider.arenaVisitor.getComponentSystem()\n sectorBaseComp = getattr(componentSystem, 'sectorBaseComponent', None)\n if sectorBaseComp is None:\n LOG_ERROR('Expected SectorBaseComponent not present!')\n return suffix\n else:\n destructibleEntityComp = getattr(componentSystem, 'destructibleEntityComponent', None)\n if destructibleEntityComp is None:\n LOG_ERROR('Expected DestructibleEntityComponent not present!')\n return suffix\n senderVID = self.sessionProvider.getCtx().getVehIDBySessionID(avatarSessionID)\n adp = self.sessionProvider.getArenaDP()\n vo = adp.getVehicleStats(senderVID)\n sectorID = vo.gameModeSpecific.getValue(EPIC_BATTLE_KEYS.PHYSICAL_SECTOR)\n lane = vo.gameModeSpecific.getValue(EPIC_BATTLE_KEYS.PLAYER_GROUP)\n hqActive = False\n hqs = destructibleEntityComp.destructibleEntities\n if hqs:\n hqActive = hqs[hqs.keys()[0]].isActive\n nonCapturedBases = sectorBaseComp.getNumNonCapturedBasesByLane(lane)\n if 0 < sectorID < 7 and 0 < lane < 4:\n suffix = '<' + i18n.makeString(EPIC_BATTLE.ZONE_ZONE_TEXT) + ' ' + ID_TO_BASENAME[sectorID] + '>'\n elif nonCapturedBases == 0 or hqActive and sectorID > 6:\n suffix = '<' + i18n.makeString(EPIC_BATTLE.ZONE_HEADQUARTERS_TEXT) + '>'\n return suffix\n\n def _formatMessage(self, message, doFormatting=True):\n avatarSessionID = message.avatarSessionID\n isCurrent = isCurrentPlayer(avatarSessionID)\n if not doFormatting:\n return (isCurrent, message.text)\n suffix = self.__getNameSuffix(avatarSessionID)\n return (\n isCurrent,\n self._mBuilder.setColors(avatarSessionID).setName(avatarSessionID, message.accountName, suffix=suffix).setText(message.text).build())\n\n def _formatCommand(self, command):\n isCurrent = False\n if command.getCommandType() == MESSENGER_COMMAND_TYPE.BATTLE:\n avatarSessionID = command.getSenderID()\n isCurrent = command.isSender()\n suffix = self.__getNameSuffix(avatarSessionID)\n text = self._mBuilder.setColors(avatarSessionID).setName(avatarSessionID, suffix=suffix).setText(command.getCommandText()).build()\n else:\n text = command.getCommandText()\n return (isCurrent, text)\n\n\nclass CommonChannelController(_ChannelController):\n\n def __init__(self, channel):\n super(CommonChannelController, self).__init__(channel, chat_message.CommonMessageBuilder())\n\n def isEnabled(self):\n return isBattleChatEnabled(True)\n\n @_check_arena_in_waiting()\n def _broadcast(self, message):\n self.proto.arenaChat.broadcast(message, 1)\n\n\nclass SquadChannelController(_ChannelController):\n\n def __init__(self, channel):\n super(SquadChannelController, self).__init__(channel, chat_message.SquadMessageBuilder(), True)\n\n def isEnabled(self):\n return self.proto.unitChat.isInited()\n\n def setView(self, view):\n super(SquadChannelController, self).setView(view)\n self.proto.unitChat.addHistory()\n\n def canSendMessage(self):\n if not self.isEnabled():\n return (False, '')\n if self.proto.unitChat.isBroadcastInCooldown():\n return (False,\n getBroadcastIsInCoolDownMessage(MESSENGER_LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC))\n return (True, '')\n\n def _broadcast(self, message):\n self.proto.unitChat.broadcast(message, 1)", "repo_name": "IzeBerg/wot-src", "sub_path": "sources/res/scripts/client/messenger/gui/Scaleform/channels/bw_chat2/battle_controllers.py", "file_name": "battle_controllers.py", "file_ext": "py", "file_size_in_byte": 8864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "33", "api": [{"api_name": "helpers.dependency.descriptor", "line_number": 25, "usage_type": "call"}, {"api_name": "skeletons.gui.battle_session.IBattleSessionProvider", "line_number": 25, "usage_type": "argument"}, {"api_name": "helpers.dependency", "line_number": 25, "usage_type": "name"}, {"api_name": "messenger.proto.events.g_messengerEvents.onErrorReceived", "line_number": 34, "usage_type": "call"}, {"api_name": "messenger.proto.events.g_messengerEvents", "line_number": 34, "usage_type": "name"}, {"api_name": "messenger.proto.shared_errors.ClientError", "line_number": 34, "usage_type": "call"}, {"api_name": "messenger.m_constants.CLIENT_ERROR_ID.WAITING_BEFORE_START", "line_number": 34, "usage_type": "attribute"}, {"api_name": "messenger.m_constants.CLIENT_ERROR_ID", "line_number": 34, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 29, "usage_type": "call"}, {"api_name": "messenger.gui.Scaleform.channels.layout.BattleLayout", "line_number": 39, "usage_type": "name"}, {"api_name": "helpers.dependency.descriptor", "line_number": 40, "usage_type": "call"}, {"api_name": "skeletons.gui.battle_session.IBattleSessionProvider", "line_number": 40, "usage_type": "argument"}, {"api_name": "helpers.dependency", "line_number": 40, "usage_type": "name"}, {"api_name": "messenger.proto.proto_getter", "line_number": 46, "usage_type": "call"}, {"api_name": "messenger.m_constants.PROTO_TYPE.BW_CHAT2", "line_number": 46, "usage_type": "attribute"}, {"api_name": "messenger.m_constants.PROTO_TYPE", "line_number": 46, "usage_type": "name"}, {"api_name": "gui.shared.g_eventBus.handleEvent", "line_number": 62, "usage_type": "call"}, {"api_name": "gui.shared.g_eventBus", "line_number": 62, "usage_type": "name"}, {"api_name": "gui.shared.events.MessengerEvent", "line_number": 62, "usage_type": "call"}, {"api_name": "gui.shared.events.MessengerEvent.BATTLE_CHANNEL_CTRL_INITED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gui.shared.EVENT_BUS_SCOPE.BATTLE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gui.shared.EVENT_BUS_SCOPE", "line_number": 62, "usage_type": "name"}, {"api_name": "messenger.formatters.users_messages.getBroadcastIsInCoolDownMessage", "line_number": 69, "usage_type": "call"}, {"api_name": "messenger_common_chat2.MESSENGER_LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC", "line_number": 69, "usage_type": "attribute"}, {"api_name": "messenger_common_chat2.MESSENGER_LIMITS", "line_number": 69, "usage_type": "name"}, {"api_name": "messenger.ext.player_helpers.isCurrentPlayer", "line_number": 74, "usage_type": "call"}, {"api_name": "soft_exception.SoftException", "line_number": 81, "usage_type": "call"}, {"api_name": "messenger.formatters.chat_message.TeamMessageBuilder", "line_number": 87, "usage_type": "call"}, {"api_name": "messenger.formatters.chat_message", "line_number": 87, "usage_type": "name"}, {"api_name": "BigWorld.player", "line_number": 99, "usage_type": "call"}, {"api_name": "messenger.m_constants.MESSENGER_COMMAND_TYPE.BATTLE", "line_number": 116, "usage_type": "attribute"}, {"api_name": "messenger.m_constants.MESSENGER_COMMAND_TYPE", "line_number": 116, "usage_type": "name"}, {"api_name": "debug_utils.LOG_ERROR", "line_number": 135, "usage_type": "call"}, {"api_name": "debug_utils.LOG_ERROR", "line_number": 140, "usage_type": "call"}, {"api_name": "gui.battle_control.arena_info.arena_vos.EPIC_BATTLE_KEYS.PHYSICAL_SECTOR", "line_number": 145, "usage_type": "attribute"}, {"api_name": "gui.battle_control.arena_info.arena_vos.EPIC_BATTLE_KEYS", "line_number": 145, "usage_type": "name"}, {"api_name": "gui.battle_control.arena_info.arena_vos.EPIC_BATTLE_KEYS.PLAYER_GROUP", "line_number": 146, "usage_type": "attribute"}, {"api_name": "gui.battle_control.arena_info.arena_vos.EPIC_BATTLE_KEYS", "line_number": 146, "usage_type": "name"}, {"api_name": "helpers.i18n.makeString", "line_number": 153, "usage_type": "call"}, {"api_name": "helpers.i18n", "line_number": 153, "usage_type": "name"}, {"api_name": "gui.Scaleform.locale.EPIC_BATTLE.EPIC_BATTLE.ZONE_ZONE_TEXT", "line_number": 153, "usage_type": "attribute"}, {"api_name": "gui.Scaleform.locale.EPIC_BATTLE.EPIC_BATTLE", "line_number": 153, "usage_type": "name"}, {"api_name": "arena_component_system.sector_base_arena_component.ID_TO_BASENAME", "line_number": 153, "usage_type": "name"}, {"api_name": "helpers.i18n.makeString", "line_number": 155, "usage_type": "call"}, {"api_name": "helpers.i18n", "line_number": 155, "usage_type": "name"}, {"api_name": "gui.Scaleform.locale.EPIC_BATTLE.EPIC_BATTLE.ZONE_HEADQUARTERS_TEXT", "line_number": 155, "usage_type": "attribute"}, {"api_name": "gui.Scaleform.locale.EPIC_BATTLE.EPIC_BATTLE", "line_number": 155, "usage_type": "name"}, {"api_name": "messenger.ext.player_helpers.isCurrentPlayer", "line_number": 160, "usage_type": "call"}, {"api_name": "messenger.m_constants.MESSENGER_COMMAND_TYPE.BATTLE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "messenger.m_constants.MESSENGER_COMMAND_TYPE", "line_number": 170, "usage_type": "name"}, {"api_name": "messenger.formatters.chat_message.CommonMessageBuilder", "line_number": 183, "usage_type": "call"}, {"api_name": "messenger.formatters.chat_message", "line_number": 183, "usage_type": "name"}, {"api_name": "messenger.ext.isBattleChatEnabled", "line_number": 186, "usage_type": "call"}, {"api_name": "messenger.formatters.chat_message.SquadMessageBuilder", "line_number": 196, "usage_type": "call"}, {"api_name": "messenger.formatters.chat_message", "line_number": 196, "usage_type": "name"}, {"api_name": "messenger.formatters.users_messages.getBroadcastIsInCoolDownMessage", "line_number": 210, "usage_type": "call"}, {"api_name": "messenger_common_chat2.MESSENGER_LIMITS.BROADCASTS_FROM_CLIENT_COOLDOWN_SEC", "line_number": 210, "usage_type": "attribute"}, {"api_name": "messenger_common_chat2.MESSENGER_LIMITS", "line_number": 210, "usage_type": "name"}]} +{"seq_id": "72825374493", "text": "import random\r\nfrom PIL import Image, ImageFilter\r\nimport os\r\n\r\n\r\ndef convert_to_yolo_format(paste_position, image, composite):\r\n x, y = paste_position\r\n potx, poty = x+image.width/2, y+image.height/2\r\n yolox, yoloy, yolow, yoloh = potx/composite.width, poty/composite.height, image.width/composite.width, image.height/composite.height\r\n return yolox, yoloy, yolow, yoloh\r\n\r\n\r\ndef check_intersection(position, image, paste_positions):\r\n\r\n x1, y1 = position\r\n width1, height1 = image.size\r\n\r\n for paste_position, paste_size in paste_positions:\r\n x2, y2 = paste_position\r\n width2, height2 = paste_size\r\n\r\n if x1 < x2 + width2 and x1 + width1 > x2 and y1 < y2 + height2 and y1 + height1 > y2:\r\n return True\r\n return False\r\n\r\n\r\ndef cutpaste(template, image_filenames, num_patchs):\r\n paste_positions = []\r\n composite = template\r\n label_dict = {'A_offset': 0, 'B_missing_part': 1, 'C_libei': 2}\r\n num_patchs = random.randint(6, 10)\r\n selected_images = random.sample(image_filenames, num_patchs)\r\n paste_position = None\r\n yolo_txt = []\r\n for cutout in selected_images:\r\n patch = Image.open(cutout)\r\n patch = patch.filter(ImageFilter.MedianFilter(size=3))\r\n patch = patch.resize((patch.size[0]//2, patch.size[1]//2))\r\n\r\n while paste_position is None or check_intersection(paste_position, patch, paste_positions):\r\n # print(paste_position)\r\n paste_position = (random.randint(0, composite.width - patch.width), random.randint(0, composite.height - patch.height))\r\n \r\n yolox, yoloy, yolow, yoloh = convert_to_yolo_format(paste_position, patch, composite)\r\n label = label_dict[cutout.split('/')[-2]]\r\n yolo_line = label, yolox, yoloy, yolow, yoloh\r\n yolo_txt.append(yolo_line)\r\n composite.paste(patch, paste_position)\r\n paste_positions.append((paste_position, patch.size))\r\n \r\n normalcutouts = []\r\n for Z_normal in os.listdir('Z_normal'):\r\n Z_normal_path = os.path.join('Z_normal', Z_normal)\r\n normalcutouts.append(Z_normal_path)\r\n num_normal = random.randint(2, 5)\r\n print('num_patchs, num_normal: ', num_patchs, num_normal)\r\n normalcut_images = random.sample(normalcutouts, num_normal)\r\n for normalcut in normalcut_images:\r\n patch = Image.open(normalcut)\r\n patch = patch.filter(ImageFilter.MedianFilter(size=3))\r\n patch = patch.resize((patch.size[0]//2, patch.size[1]//2))\r\n\r\n while paste_position is None or check_intersection(paste_position, patch, paste_positions):\r\n paste_position = (random.randint(0, composite.width - patch.width), random.randint(0, composite.height - patch.height))\r\n composite.paste(patch, paste_position)\r\n paste_positions.append((paste_position, patch.size))\r\n \r\n return composite, yolo_txt\r\n\r\ndef main(template, patch_path):\r\n template = Image.open(template)\r\n\r\n path = patch_path\r\n image_filenames = []\r\n for subdir in os.listdir(path):\r\n subdir_path = os.path.join(path, subdir)\r\n for file in os.listdir(subdir_path):\r\n file_path = os.path.join(subdir_path, file)\r\n image_filenames.append(file_path)\r\n\r\n composite, yolo_txt = cutpaste(template, image_filenames, 8)\r\n # composite.save('result1.jpg')\r\n return composite, yolo_txt\r\n\r\nif __name__ == '__main__':\r\n template = 'template.jpg'\r\n patch_path = 'lianbao3f'\r\n for iter in range(10000):\r\n composite, yolo_txt = main(template, patch_path)\r\n composite.save(f'generation/res{iter}.jpg')\r\n\r\n with open(f'glabel/res{iter}.txt', 'w')as f:\r\n for i in range(len(yolo_txt)):\r\n line = f\"{yolo_txt[i][0]} {yolo_txt[i][1]} {yolo_txt[i][2]} {yolo_txt[i][3]} {yolo_txt[i][4]}\\n\"\r\n f.write(line)\r\n\r\n", "repo_name": "JnanaG/myCutpaste", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.MedianFilter", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 37, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 42, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 59, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 59, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.MedianFilter", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 60, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 71, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "26974626763", "text": "#this is the solution of Question 787 on Leetcode\n\n\n#This is like a shortest path problem but we have a limitation on number of stops when traveling.\n#for shortest path, we can just use Djisktra algorithm but here we'll change it slightly.\n\nimport heapq as heap\nfrom collections import defaultdict\nclass Solution:\n def findCheapestPrice(self, n: int, flights: List[List[int]], src: int, dst: int, k: int) -> int:\n #building the graph\n G = self.build(flights)\n \n #heap will be as: [(total_price_sofar , number_of_stops_sofar , node_number)]\n #heap will give the array with minimum first element and that means we'll always pop minimum total_price_sofar in our search\n cost_heap = [(0,0,src)]\n heap.heapify(cost_heap)\n #we create a seen hashmap and as key : value => node : minimum_stops_for_this_node\n #if we can reach a node in 3 steps and with 40$ we don't need to consider the option\n #where we reach it in 6 steps and 60$. First one is already better and heap will give us the minimum prices always.\n seen = {}\n #we'll do a breadth first search as long as something remains in heap\n while cost_heap:\n cost_sum, k1, node = heap.heappop(cost_heap)\n #if we reach node, thanks to heap this is the lowest we can get and also number of stops <= given k. That proves it is true\n if node == dst: return cost_sum\n #as explained above, if there is a lower stops with lower cost, we don't need to consider higher cost with higher stops.\n if node in seen and seen[node] <= k1: continue\n seen[node] = k1\n #we can push k+1 to heap because if k+1 travel stop == destination, this is still acceptable. but we don't push k+2 or above.\n if k1 > k: continue\n for neig, cost in G[node]:\n heap.heappush(cost_heap, (cost_sum+cost,k1+1,neig))\n return -1\n\n #helper function to build graph\n def build(self, flights):\n G = defaultdict(list)\n for f,t,p in flights:\n G[f].append((t,p))\n return G\n\n ", "repo_name": "mustafa-ersoy/Best_Algorithms_Questions", "sub_path": "787.py", "file_name": "787.py", "file_ext": "py", "file_size_in_byte": 2137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "heapq.heapify", "line_number": 17, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 24, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "8399461145", "text": "from webob import exc\nimport datetime\nimport re\nimport math\n\nfrom web import EmulateWeb\nfrom ..model import PostBlog, session, Content, Dig, Tags, BlogTag\nfrom .user import authenticate\nfrom ..util import jsonify\n\npost_router = EmulateWeb.Router('/post')\n\n\n@post_router.post('/')\n@authenticate\ndef pub(ctx, request:EmulateWeb.Request):\n payload = request.json\n\n try:\n author_id = request.user.id\n title = payload.get('title')\n text = payload.get('content')\n tags = re.split('[\\s,]+',payload.get('tag', ''))\n except:\n raise exc.HTTPBadRequest()\n content = Content()\n content.content = text\n post = PostBlog()\n post.authorid = author_id\n post.title = title\n post.content = content\n post.postdate = datetime.datetime.now()\n\n for tag in tags:\n t = session.query(Tags).filter(Tags.tag == tag).fist()\n if not t:\n t = Tags()\n t.tag = tag\n session.add(t)\n\n blog_tag = BlogTag()\n blog_tag.blog = post\n blog_tag.tag = t\n session.add(blog_tag)\n\n session.add(post)\n\n try:\n session.commit()\n return jsonify(post_id=post.id)\n except:\n session.rollback()\n raise exc.HTTPInternalServerError\n\n@post_router.get('/{id:int}')\ndef get(ctx, request:EmulateWeb.Request):\n '''request specific blog'''\n blog_id = request.vars.id\n try:\n blog = session.query(PostBlog).filter(PostBlog.id == blog_id).one()\n print(blog.hits, type(blog.hits))\n\n blog.hits += 1\n session.add(blog)\n try:\n session.commit()\n except:\n session.rollback()\n\n #dig_info bury_info\n dig_info, bury_info = get_dig_or_bury(blog_id)\n\n #tags\n tags = session.query(BlogTag).filter(BlogTag.blog_id == blog_id).limit(10).all()\n if tags:\n tags_info = {[{'tag_id': tag.tag_id, 'tag': tag.tag.tag} for tag in tags]}\n else:\n tags_info = {}\n\n return jsonify(blog={\n 'blog_id':blog.id,\n 'title':blog.title,\n 'author':blog.author.name,\n 'post_date':blog.postdate.timestamp(),\n 'content':blog.content.content\n }, dig_info = dig_info, bury_info = bury_info, tags_info=tags_info)\n except Exception as e:\n print(e)\n raise exc.HTTPNotFound()\n\n\ndef get_dig_or_bury(blog_id):\n dig_query = session.query(Dig).filter(Dig.blog_id == blog_id)\n dig_count = dig_query.filter(Dig.state == 1).count()\n dig_list = dig_query.filter(Dig.state == 1).order_by(Dig.putdate.desc()).limit(10).all()\n dig_info = {'count': dig_count, 'users': [{'user_id': dig.user_id, 'name': dig.user.name} for dig in dig_list]}\n bury_count = dig_query.filter(Dig.state == 0).count()\n bury_list = dig_query.filter(Dig.state == 0).order_by(Dig.putdate.desc()).limit(10).all()\n bury_info = {'count': bury_count,\n 'users': [{'user_id': bury.user_id, 'name': bury.user.name} for bury in bury_list]}\n\n return dig_info, bury_info\n\n\ndef getparam(d:dict, name:str, param_type, default, func):\n result = param_type(d.get(name, default))\n result = func(result, default)\n return result\n\n@post_router.get('/')\n@post_router.get('/user/{id:int}')\n@post_router.get('/tag/{tag:str}')\ndef list(ctx, request:EmulateWeb.Request):\n '''query blogs in database, user can set the page and the number of blogs in each page'''\n\n # try:\n # page = int(request.params.get('page', 1))\n # page = page if page > 0 else 1\n # except:\n # page = 1\n page = getparam(request.params, 'page', int, 1, lambda x,y:x if x>0 else y)\n size = getparam(request.params, 'size', int, 10, lambda x,y:x if x>0 and x<31 else y)\n\n try:\n count = session.query(PostBlog).count()\n blogs = session.query(PostBlog).order_by(PostBlog.id.desc()).limit(size).offset(size*(page-1)).all()\n return jsonify(blogs=[{\n 'blog_id':blog.id,\n 'title':blog.title,\n } for blog in blogs],\n page_info={\n 'page':page,\n 'size':size,\n 'count':count,\n 'pages':math.ceil(count / size)\n })\n except Exception as e:\n print(e)\n raise exc.HTTPInternalServerError()\n\ndef dig_or_bury(userid, blogid, state):\n '''set the state dig or bury on one blog'''\n dig = Dig()\n dig.user_id = userid\n dig.blog_id = blogid\n dig.state = state\n\n session.add(dig)\n try:\n session.commit()\n return jsonify()\n except:\n session.rollback()\n return jsonify(status=500)\n\n@post_router.put('/dig/{id:int}')\n@authenticate\ndef dig(ctx, request:EmulateWeb.Request):\n '''put the state dig to one blog'''\n return dig_or_bury(request.user.id, request.vars.id, 1)\n\n@post_router.put('/bury/{id:int}')\n@authenticate\ndef bury(ctx, request:EmulateWeb.Request):\n '''put the state bury to one blog'''\n return dig_or_bury(request.user.id, request.vars.id, 0)\n", "repo_name": "simonebrave/BlogSimulationBaseOnReactNative", "sub_path": "BlogSimulation/blog/blog/handler/post.py", "file_name": "post.py", "file_ext": "py", "file_size_in_byte": 5003, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "web.EmulateWeb.Router", "line_number": 11, "usage_type": "call"}, {"api_name": "web.EmulateWeb", "line_number": 11, "usage_type": "name"}, {"api_name": "web.EmulateWeb.Request", "line_number": 16, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb", "line_number": 16, "usage_type": "name"}, {"api_name": "re.split", "line_number": 23, "usage_type": "call"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 25, "usage_type": "call"}, {"api_name": "webob.exc", "line_number": 25, "usage_type": "name"}, {"api_name": "model.Content", "line_number": 26, "usage_type": "call"}, {"api_name": "model.PostBlog", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "model.session.query", "line_number": 35, "usage_type": "call"}, {"api_name": "model.Tags", "line_number": 35, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 35, "usage_type": "name"}, {"api_name": "model.Tags.tag", "line_number": 35, "usage_type": "attribute"}, {"api_name": "model.Tags", "line_number": 37, "usage_type": "call"}, {"api_name": "model.session.add", "line_number": 39, "usage_type": "call"}, {"api_name": "model.session", "line_number": 39, "usage_type": "name"}, {"api_name": "model.BlogTag", "line_number": 41, "usage_type": "call"}, {"api_name": "model.session.add", "line_number": 44, "usage_type": "call"}, {"api_name": "model.session", "line_number": 44, "usage_type": "name"}, {"api_name": "model.session.add", "line_number": 46, "usage_type": "call"}, {"api_name": "model.session", "line_number": 46, "usage_type": "name"}, {"api_name": "model.session.commit", "line_number": 49, "usage_type": "call"}, {"api_name": "model.session", "line_number": 49, "usage_type": "name"}, {"api_name": "util.jsonify", "line_number": 50, "usage_type": "call"}, {"api_name": "model.session.rollback", "line_number": 52, "usage_type": "call"}, {"api_name": "model.session", "line_number": 52, "usage_type": "name"}, {"api_name": "webob.exc.HTTPInternalServerError", "line_number": 53, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 53, "usage_type": "name"}, {"api_name": "user.authenticate", "line_number": 15, "usage_type": "name"}, {"api_name": "web.EmulateWeb.Request", "line_number": 56, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb", "line_number": 56, "usage_type": "name"}, {"api_name": "model.session.query", "line_number": 60, "usage_type": "call"}, {"api_name": "model.PostBlog", "line_number": 60, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 60, "usage_type": "name"}, {"api_name": "model.PostBlog.id", "line_number": 60, "usage_type": "attribute"}, {"api_name": "model.session.add", "line_number": 64, "usage_type": "call"}, {"api_name": "model.session", "line_number": 64, "usage_type": "name"}, {"api_name": "model.session.commit", "line_number": 66, "usage_type": "call"}, {"api_name": "model.session", "line_number": 66, "usage_type": "name"}, {"api_name": "model.session.rollback", "line_number": 68, "usage_type": "call"}, {"api_name": "model.session", "line_number": 68, "usage_type": "name"}, {"api_name": "model.session.query", "line_number": 74, "usage_type": "call"}, {"api_name": "model.BlogTag", "line_number": 74, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 74, "usage_type": "name"}, {"api_name": "model.BlogTag.blog_id", "line_number": 74, "usage_type": "attribute"}, {"api_name": "util.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "webob.exc.HTTPNotFound", "line_number": 89, "usage_type": "call"}, {"api_name": "webob.exc", "line_number": 89, "usage_type": "name"}, {"api_name": "model.session.query", "line_number": 93, "usage_type": "call"}, {"api_name": "model.Dig", "line_number": 93, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 93, "usage_type": "name"}, {"api_name": "model.Dig.blog_id", "line_number": 93, "usage_type": "attribute"}, {"api_name": "model.Dig.state", "line_number": 94, "usage_type": "attribute"}, {"api_name": "model.Dig", "line_number": 94, "usage_type": "name"}, {"api_name": "model.Dig.state", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.Dig", "line_number": 95, "usage_type": "name"}, {"api_name": "model.Dig.putdate.desc", "line_number": 95, "usage_type": "call"}, {"api_name": "model.Dig.putdate", "line_number": 95, "usage_type": "attribute"}, {"api_name": "model.Dig.state", "line_number": 97, "usage_type": "attribute"}, {"api_name": "model.Dig", "line_number": 97, "usage_type": "name"}, {"api_name": "model.Dig.state", "line_number": 98, "usage_type": "attribute"}, {"api_name": "model.Dig", "line_number": 98, "usage_type": "name"}, {"api_name": "model.Dig.putdate.desc", "line_number": 98, "usage_type": "call"}, {"api_name": "model.Dig.putdate", "line_number": 98, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb.Request", "line_number": 113, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb", "line_number": 113, "usage_type": "name"}, {"api_name": "model.session.query", "line_number": 125, "usage_type": "call"}, {"api_name": "model.PostBlog", "line_number": 125, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 125, "usage_type": "name"}, {"api_name": "model.session.query", "line_number": 126, "usage_type": "call"}, {"api_name": "model.PostBlog", "line_number": 126, "usage_type": "argument"}, {"api_name": "model.session", "line_number": 126, "usage_type": "name"}, {"api_name": "model.PostBlog.id.desc", "line_number": 126, "usage_type": "call"}, {"api_name": "model.PostBlog.id", "line_number": 126, "usage_type": "attribute"}, {"api_name": "util.jsonify", "line_number": 127, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 135, "usage_type": "call"}, {"api_name": "webob.exc.HTTPInternalServerError", "line_number": 139, "usage_type": "call"}, {"api_name": "webob.exc", "line_number": 139, "usage_type": "name"}, {"api_name": "model.Dig", "line_number": 143, "usage_type": "call"}, {"api_name": "model.session.add", "line_number": 148, "usage_type": "call"}, {"api_name": "model.session", "line_number": 148, "usage_type": "name"}, {"api_name": "model.session.commit", "line_number": 150, "usage_type": "call"}, {"api_name": "model.session", "line_number": 150, "usage_type": "name"}, {"api_name": "util.jsonify", "line_number": 151, "usage_type": "call"}, {"api_name": "model.session.rollback", "line_number": 153, "usage_type": "call"}, {"api_name": "model.session", "line_number": 153, "usage_type": "name"}, {"api_name": "util.jsonify", "line_number": 154, "usage_type": "call"}, {"api_name": "web.EmulateWeb.Request", "line_number": 158, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb", "line_number": 158, "usage_type": "name"}, {"api_name": "user.authenticate", "line_number": 157, "usage_type": "name"}, {"api_name": "web.EmulateWeb.Request", "line_number": 164, "usage_type": "attribute"}, {"api_name": "web.EmulateWeb", "line_number": 164, "usage_type": "name"}, {"api_name": "user.authenticate", "line_number": 163, "usage_type": "name"}]} +{"seq_id": "34018437315", "text": "#!/usr/bin/env python\n\nimport cv2\nfrom IPython.display import HTML, display\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport os\n\n# Custom func for cropping\nfrom cropping import *\n\n# script with functions used in model inference.py\n\n\ndef draw(frame, keypoints, EDGE_COLORS, size, threshold=0.11):\n \"\"\"\n Draws the keypoints and edges\n \"\"\"\n # Draw the keypoints and get the denormalized coordinates\n denormalized_coordinates = draw_keypoints(frame, keypoints, size, threshold)\n # Draw the edges\n draw_edges(denormalized_coordinates, frame, EDGE_COLORS, threshold)\n\n\ndef draw_keypoints(frame, keypoints, size, threshold=0.11):\n \"\"\"Draws the keypoints on a image frame\"\"\"\n\n # Denormalize the coordinates : multiply the normalized coordinates by the input_size(width,height)\n denormalized_coordinates = np.squeeze(np.multiply(keypoints, [size[0], size[1], 1]))\n # Iterate through the points\n for keypoint in denormalized_coordinates:\n # Unpack the keypoint values : y, x, confidence score\n keypoint_y, keypoint_x, keypoint_confidence = keypoint\n if keypoint_confidence > threshold:\n \"\"\" \"\n Draw the circle\n Note : A thickness of -1 px will fill the circle shape by the specified color.\n \"\"\"\n cv2.circle(\n img=frame,\n center=(int(keypoint_x), int(keypoint_y)),\n radius=4,\n color=(255, 0, 0),\n thickness=-1,\n )\n return denormalized_coordinates\n\n\ndef draw_edges(denormalized_coordinates, frame, edges_colors, threshold=0.11):\n \"\"\"\n Draws the edges on a image frame\n \"\"\"\n\n # Iterate through the edges\n for edge, color in edges_colors.items():\n # Get the dict value associated to the actual edge\n p1, p2 = edge\n # Get the points\n y1, x1, confidence_1 = denormalized_coordinates[p1]\n y2, x2, confidence_2 = denormalized_coordinates[p2]\n # Draw the line from point 1 to point 2, the confidence > threshold\n if (confidence_1 > threshold) & (confidence_2 > threshold):\n cv2.line(\n img=frame,\n pt1=(int(x1), int(y1)),\n pt2=(int(x2), int(y2)),\n color=color,\n thickness=2,\n lineType=cv2.LINE_AA, # Gives anti-aliased (smoothed) line which looks great for curves\n )\n\n\ndef load_video(input_video_path):\n \"\"\"\n Loads the video and return its details\n \"\"\"\n\n # Load the video\n video = cv2.VideoCapture(input_video_path)\n # Get the frame count\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n # Display parameter\n print(f\"Frame count: {frame_count}\")\n\n # Get the initial shape (width, height)\n initial_shape = []\n initial_shape.append(int(video.get(cv2.CAP_PROP_FRAME_WIDTH)))\n initial_shape.append(int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n # return video, frame_count, output_frames, initial_shape\n return video, frame_count, initial_shape\n\n\ndef run_inference(\n input_video_path,\n out_video_path,\n model_func,\n EDGE_COLORS,\n use_cropping=True,\n FPS=20,\n INFERENCE_SIZE=(256, 256),\n):\n \"\"\"\n Runs inferences then starts the main loop for each frame\n \"\"\"\n\n # Load the video\n video, frame_count, initial_shape = load_video(input_video_path)\n\n # for cropping:\n if use_cropping:\n crop_region = init_crop_region(INFERENCE_SIZE[0], INFERENCE_SIZE[1])\n\n # Create output video writer\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n video_writer = cv2.VideoWriter(\n out_video_path, fourcc, float(FPS), (initial_shape[0], initial_shape[1])\n )\n # Create keypoints result dict for storing keypoints from each frame\n keypoints_dict = {}\n\n # Create frame counter for storing keypoints to dict\n frame_counter = -1\n # Loop while the video is opened\n while video.isOpened():\n\n frame_counter += 1\n\n # Capture the frame\n ret, frame = video.read()\n\n # Exit if the frame is empty\n if frame is None:\n break\n\n # Retrieve the frame index\n current_index = video.get(cv2.CAP_PROP_POS_FRAMES)\n\n # Copy the frame\n image = frame.copy()\n image = cv2.resize(image, INFERENCE_SIZE)\n # Resize to the target shape and cast to an int32 vector\n input_image = tf.cast(\n tf.image.resize_with_pad(image, INFERENCE_SIZE[0], INFERENCE_SIZE[1]),\n dtype=tf.int32,\n )\n\n # inference variant without cropping\n if not use_cropping:\n # Create a batch (input tensor)\n input_image = tf.expand_dims(input_image, axis=0)\n\n # Perform inference\n results = model_func(input_image)\n\n # initial shape of res is (1,1,17,3)\n keypoints = results[\"output_0\"][0][0]\n\n # inference variant with cropping\n else:\n crop_size = INFERENCE_SIZE\n keypoints = inference_with_cropping(\n model_func, input_image, crop_region, crop_size\n )\n keypoints = keypoints[0][0]\n\n # Draw the results to frame\n draw(image, keypoints, EDGE_COLORS, INFERENCE_SIZE, threshold=0.11)\n\n # Denormalizing resulted keypoints to initial scale and crop 3 column with scores\n keypoints = np.multiply(keypoints[:, 0:2], [initial_shape[0], initial_shape[1]])\n # Adding to resulted dict\n keypoints_dict[\"pose_\" + str(frame_counter)] = keypoints.tolist()\n\n # Get the output frame : reshape to the original size\n frame_rgb = cv2.cvtColor(\n cv2.resize(\n image,\n (initial_shape[0], initial_shape[1]),\n interpolation=cv2.INTER_LANCZOS4,\n ),\n cv2.COLOR_BGR2RGB, # OpenCV processes BGR images instead of RGB\n )\n\n # Add resulted frame to the video_writer\n video_writer.write(frame_rgb)\n\n # Release the object\n video_writer.release()\n\n print(\"Completed !\")\n\n return keypoints_dict\n", "repo_name": "Barabaika/Pose_estimation_tool", "sub_path": "main/inference_funcs.py", "file_name": "inference_funcs.py", "file_ext": "py", "file_size_in_byte": 6135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.squeeze", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 70, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 88, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 144, "usage_type": "call"}, {"api_name": "tensorflow.image.resize_with_pad", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 145, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 177, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.INTER_LANCZOS4", "line_number": 181, "usage_type": "attribute"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 183, "usage_type": "attribute"}]} +{"seq_id": "31803967882", "text": "import sys\n\nfrom abc import ABC, abstractmethod\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtGui import QColor, QPainter, QPen, QFontMetrics\nimport pyaudio\nimport struct\n\nfrom midi.score_to_vst_midi_converter import MidiMessage, NoteMessage, MetaMessage, ExpressionVelocityMessage\n\nfrom ctypes import CDLL, Structure, c_int, c_char_p, c_int32, py_object\nimport os\n\nLIBRARY = 'lib/libvst23host'\n\n\nclass PyEvent(Structure):\n _fields_ = [('msg_type', c_int32),\n ('channel', c_int32),\n ('data1', c_int32),\n ('data2', c_int32),\n ('rel_frame_time', c_int32),\n ('abs_frame_time', c_int32)\n ]\n\n\nCHUNK = 1024\nSAMPLE_RATE = 44100 # samples per second\n\n\nclass VstAppUserInterface(ABC):\n @abstractmethod\n def get_library_name(self):\n pass\n\n @abstractmethod\n def get_vst_midi_event_list(self):\n pass\n\n @abstractmethod\n def get_time_in_ms(self):\n pass\n\n @abstractmethod\n def save_generated_buffers(self, left_buffer, right_buffer):\n pass\n\n @abstractmethod\n def get_audio_buffers(self):\n pass\n\n @abstractmethod\n def get_save_preset_filename(self):\n pass\n\n @abstractmethod\n def get_load_preset_filename(self):\n pass\n\n\nclass VstInterfaceApp(QMainWindow):\n def __init__(self, vst_app_user_interface, alt_lib_path):\n QMainWindow.__init__(self)\n self.vst_app_user_interface = vst_app_user_interface\n self.title = 'VST/QT5 player interface.'\n self.help_menu = None\n self.exit_act = None\n self.vst_library = None\n self.error = None\n self.vst_lib_path_name = None\n self.is_vst2 = False\n\n self.lib_path = alt_lib_path\n if self.lib_path is None:\n path = os.path.abspath(sys.modules[VstInterfaceApp.__module__].__file__)\n end_index = path.rindex('/')\n self.lib_path = path[0: end_index + 1] + LIBRARY\n\n try:\n self.vst_library = CDLL(os.path.abspath(self.lib_path), mode=1)\n except Exception as e:\n self.error = 'Could not load library={0}: {1}'.format(self.lib_path, e)\n print(self.error, file=sys.stderr, flush=True)\n return\n\n self.init_ui()\n\n def init_ui(self):\n self.create_actions()\n self.create_menus()\n\n self.setWindowTitle(self.title)\n self.setGeometry(50, 20, 640, 480)\n self.statusBar().showMessage('Message in Status Bar.')\n\n widget = DrawingWidget()\n self.setCentralWidget(widget)\n\n def error(self):\n return self.error\n\n def create_actions(self):\n self.load_library_act = QAction(\"Load &Library\", self, shortcut='Ctrl+L', statusTip='Load Vst Library',\n triggered=self.load_library)\n self.load_instruments_act = QAction(\"Load &Instruments\", self, shortcut='Ctrl+I', statusTip='Load Instruments',\n triggered=self.load_instruments)\n self.load_preset = QAction(\"L&oad Preset\", self, shortcut=\"Ctrl+o\", statusTip=\"Load Preset\",\n triggered=self.load_preset)\n self.save_preset = QAction(\"&Save Preset\", self, shortcut=\"Ctrl+S\", statusTip=\"Save Preset\",\n triggered=self.save_preset)\n self.generate_audio_act = QAction(\"&Generate Audio\", self, shortcut='Ctrl+G', statusTip='Generate Audio',\n triggered=self.generate_audio)\n self.play_audio_act = QAction(\"&Play Audio\", self, shortcut='Ctrl+P', statusTip='Play Audio',\n triggered=self.play)\n self.exit_act = QAction(\"E&xit\", self, shortcut='Ctrl+Q', statusTip='Exit application',\n triggered=self.close)\n\n def create_menus(self):\n self.menuBar().setNativeMenuBar(False)\n primary_actions = self. menuBar().addMenu('&Actions')\n primary_actions.addAction(self.load_library_act)\n primary_actions.addAction(self.load_instruments_act)\n primary_actions.addAction(self.load_preset)\n primary_actions.addAction(self.save_preset)\n primary_actions.addAction(self.generate_audio_act)\n primary_actions.addAction(self.play_audio_act)\n\n self.help_menu = self.menuBar().addMenu(\"&Help\")\n primary_actions.addAction(self.exit_act)\n\n def load_library(self):\n self.vst_lib_path_name = self.vst_app_user_interface.get_library_name()\n print(self.vst_lib_path_name)\n if self.vst_lib_path_name[-1] == '3':\n self.vst_library.connect_vst3(self.vst_lib_path_name.encode('ascii'))\n else:\n self.vst_library.connect_vst2(self.vst_lib_path_name.encode('ascii'))\n self.is_vst2 = True\n\n # Important to set up to get return values properly from these called methods.\n self.vst_library.process_events.restype = py_object\n self.vst_library.process_events2.restype = py_object\n\n def load_instruments(self):\n if self.vst_library is None:\n self.statusBar().showMessage('Cannot load instrument before loading vst.')\n return\n\n if not self.is_vst2:\n self.vst_library.view_and_show()\n else:\n self.vst_library.view_and_show2()\n\n def load_preset(self):\n if not self.is_vst2:\n self.vst_library.load_preset(self.vst_app_user_interface.get_load_preset_filename().encode('ascii'))\n else:\n self.vst_library.load_bank(self.vst_app_user_interface.get_load_preset_filename().encode('ascii'))\n\n def save_preset(self):\n if not self.is_vst2:\n self.vst_library.save_preset(self.vst_app_user_interface.get_save_preset_filename().encode('ascii'))\n else:\n self.vst_library.save_bank(self.vst_app_user_interface.get_save_preset_filename().encode('ascii'))\n\n def feed_events(self, midi_message_list):\n midi_message_array = VstInterfaceApp.convert_midi_message_list_to_py_event(midi_message_list)\n if not self.is_vst2:\n self.vst_library.feed_events(midi_message_array, len(midi_message_list))\n else:\n self.vst_library.feed_events2(midi_message_array, len(midi_message_list))\n return\n\n def generate_audio(self, play_time_in_ms):\n if self.is_vst2:\n self.vst_library.begin_event_rendering2()\n\n midi_message_list = self.vst_app_user_interface.get_vst_midi_event_list()\n if midi_message_list is not None:\n self.feed_events(midi_message_list)\n\n if not self.is_vst2:\n (left_buffer, right_buffer) = self.vst_library.process_events(\n int(self.vst_app_user_interface.get_time_in_ms()))\n else:\n (left_buffer, right_buffer) = self.vst_library.process_events2(\n int(self.vst_app_user_interface.get_time_in_ms()))\n self.vst_library.end_event_rendering2()\n\n self.vst_app_user_interface.save_generated_buffers(left_buffer, right_buffer)\n\n def play(self):\n print('playing ...')\n (left_audio_buffer, right_audio_buffer) = self.vst_app_user_interface.get_audio_buffers()\n self.num_samples = len(left_audio_buffer)\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=pyaudio.paFloat32,\n channels=2,\n rate=SAMPLE_RATE,\n output=True)\n\n sample_number = 0\n data_a = bytearray(CHUNK * 2 * 4)\n while sample_number < len(left_audio_buffer):\n num_samples_to_get = min(CHUNK, self.num_samples - sample_number)\n for i in range(0, num_samples_to_get):\n ba1 = bytearray(struct.pack(\"f\", left_audio_buffer[sample_number]))\n ba2 = bytearray(struct.pack(\"f\", right_audio_buffer[sample_number]))\n pos = 2 * 4 * i\n data_a[pos: pos + len(ba1)] = ba1\n data_a[pos + len(ba1): pos + len(ba1) + len(ba2)] = ba2\n sample_number += 1\n\n d = bytes(data_a)\n stream.write(d)\n\n print('finished playing')\n\n def disconnect(self):\n if not self.is_vst2:\n self.vst_library.close_vst()\n else:\n self.vst_library.close_vst2()\n self.vst_library = None\n\n def close(self):\n self.disconnect()\n super().close()\n\n @staticmethod\n def convert_midi_message_list_to_py_event(message_list):\n event_array = (PyEvent * len(message_list))()\n for i in range(0, len(message_list)):\n message = message_list[i]\n event_array[i].msg_type = message.msg_type\n event_array[i].channel = message.channel\n event_array[i].rel_frame_time = message.rel_frame_time\n event_array[i].abs_frame_time = message.abs_frame_time\n event_array[i].data1 = 0\n event_array[i].data2 = 0\n if isinstance(message, NoteMessage):\n event_array[i].data1 = message.note_value\n event_array[i].data2 = message.velocity\n elif isinstance(message, MetaMessage):\n event_array[i].data1 = message.value\n elif isinstance(message, ExpressionVelocityMessage):\n event_array[i].data1 = message.velocity\n return event_array\n\n\nclass DrawingWidget(QWidget):\n\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.pen = QtGui.QPen(QColor(200, 0, 0))\n self.pen.setWidth(3)\n self.brush = QtGui.QBrush(QColor(0, 255, 255, 255))\n\n self.setAutoFillBackground(True)\n\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.green)\n self.setPalette(p)\n\n def paintEvent(self, event):\n # painter = QPainter(self)\n pass\n\n def mouse_pressed(self, event):\n # p = QtGui.QCursor.pos()\n return\n\n def mouse_moved(self, event):\n return\n\n def mouse_released(self, event):\n return\n\n\ndef vst_interface_launch(vst_app_user_interface, args=None, alt_lib_path=None):\n app = QApplication(args)\n window = VstInterfaceApp(vst_app_user_interface, alt_lib_path)\n if window.error is not None:\n sys.exit(1)\n window.show()\n sys.exit(app.exec())\n", "repo_name": "dpazel/music_rep", "sub_path": "vstinterface/vst_interface.py", "file_name": "vst_interface.py", "file_ext": "py", "file_size_in_byte": 10432, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "41", "api": [{"api_name": "ctypes.Structure", "line_number": 20, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 21, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 22, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 23, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 24, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 25, "usage_type": "name"}, {"api_name": "ctypes.c_int32", "line_number": 26, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 34, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 35, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 39, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 43, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 47, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 51, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 55, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 59, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 64, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow.__init__", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 66, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 78, "usage_type": "attribute"}, {"api_name": "ctypes.CDLL", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 86, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 106, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 108, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 110, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 112, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 114, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAction", "line_number": 118, "usage_type": "call"}, {"api_name": "ctypes.py_object", "line_number": 144, "usage_type": "name"}, {"api_name": "ctypes.py_object", "line_number": 145, "usage_type": "name"}, {"api_name": "pyaudio.PyAudio", "line_number": 200, "usage_type": "call"}, {"api_name": "pyaudio.paFloat32", "line_number": 202, "usage_type": "attribute"}, {"api_name": "struct.pack", "line_number": 212, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 213, "usage_type": "call"}, {"api_name": "midi.score_to_vst_midi_converter.NoteMessage", "line_number": 246, "usage_type": "argument"}, {"api_name": "midi.score_to_vst_midi_converter.MetaMessage", "line_number": 249, "usage_type": "argument"}, {"api_name": "midi.score_to_vst_midi_converter.ExpressionVelocityMessage", "line_number": 251, "usage_type": "argument"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 256, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget.__init__", "line_number": 259, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 259, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPen", "line_number": 260, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 260, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 260, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 262, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 262, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QColor", "line_number": 262, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.green", "line_number": 267, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 267, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 286, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 289, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 291, "usage_type": "call"}]} +{"seq_id": "2854252103", "text": "#cartPoleSubmission.py\n#contains my code for performing CartPole-v0\n\n#imports\n\nimport numpy as np\nimport gym\nimport sys\nimport random\nimport scipy.misc as spm\n\n#helpers\n\n#linear basis functions\n\ndef linearBasisFunc(stateVec,action):\n #helper for generating linear basis calculation\n #action is integer, stateVec is a vector\n stateMulVec = np.zeros(stateVec.shape[0] * 2)\n for i in range(stateVec.shape[0]):\n stateMulVec[(i * 2)] = stateVec[i] * action\n stateMulVec[(i * 2) + 1] = stateVec[i] * (1 - action)\n return stateMulVec\n\ndef firstInteractionBasisFunc(stateVec,action):\n #helper for generating a linear basis along with a basis of interactions\n numStates = stateVec.shape[0]\n numLinearObs = spm.comb(numStates,1) * 2\n numInterObs = spm.comb(numStates,2) * 2\n stateMulVec = np.zeros((numLinearObs + numInterObs).astype(\"int\"))\n intIndexLookup = {(0,1):0,(0,2):2,(0,3):4,(1,2):6,(1,3):8,(2,3):10}\n #first get linear component\n for i in range(numStates):\n stateMulVec[(i * 2)] = stateVec[i] * action\n stateMulVec[(i * 2) + 1] = stateVec[i] * (1 - action)\n #then get interaction component\n for i in range(numStates):\n for j in range(i+1,numStates):\n stateLookup = (numLinearObs + intIndexLookup[(i,j)]).astype(\"int\")\n stateMulVec[stateLookup] = stateVec[i] * stateVec[j] * action\n stateMulVec[stateLookup + 1] = (stateVec[i]*stateVec[j]*(1-action))\n return stateMulVec\n\ndef firstIntWithPolynomials(stateVec,action):\n #helper for generating a linear basis along with a basis of interactions and\n #second degree polynomials\n numStates = stateVec.shape[0]\n numLinearObs = numPolyObs = spm.comb(numStates,1) * 2\n numInterObs = spm.comb(numStates,2) * 2\n stateMulVec = np.zeros((numLinearObs + numInterObs + numPolyObs).astype(\n \"int\"))\n intIndexLookup = {(0,1):0,(0,2):2,(0,3):4,(1,2):6,(1,3):8,(2,3):10}\n #first get linear component\n for i in range(numStates):\n stateMulVec[(i * 2)] = stateVec[i] * action\n stateMulVec[(i * 2) + 1] = stateVec[i] * (1 - action)\n #then get interaction component\n for i in range(numStates):\n for j in range(i+1,numStates):\n stateLookup = (numLinearObs + intIndexLookup[(i,j)]).astype(\"int\")\n stateMulVec[stateLookup] = stateVec[i] * stateVec[j] * action\n stateMulVec[stateLookup + 1] = (stateVec[i]*stateVec[j]*(1-action))\n #then get first degree polynomials\n stateStart = (numLinearObs + numInterObs).astype(\"int\")\n for i in range(numStates):\n stateMulVec[stateStart+(i * 2)] = (stateVec[i]**2) * action\n stateMulVec[stateStart+(i * 2) + 1] = (stateVec[i]**2) * (1 - action)\n return stateMulVec\n#classes\n\nclass LinearQFunc:\n #helper that holds our q function, a linear approximator\n def __init__(self,basisFunc):\n #helper that prepares our q function\n self.basisFunc = basisFunc\n\n def q(self,stateVec,action,weightVec):\n return np.dot(weightVec,self.basisFunc(stateVec,action))\n\n def gradQ(self,stateVec,action,weightVec):\n #since linear, it is just the basis function\n return self.basisFunc(stateVec,action)\n\nclass Agent:\n #defines the interactor with the environment\n def __init__(self,actionSet,weightVec,alpha,gamma,epsilon,basisFunc):\n self.actionSet = actionSet\n self.weightVec = weightVec\n self.alpha = alpha\n self.gamma = gamma\n self.epsilon = epsilon\n self.qFunc = LinearQFunc(basisFunc)\n #places to store states and actions\n self.nextState = None\n self.nextAction = None\n self.reward = 0\n self.prevState = None\n self.prevAction = None\n\n def chooseAction(self,state): #helper that makes an action decision based on\n #a given state\n randomNum = random.uniform(0,1)\n if (randomNum > 1 - self.epsilon): #choose random policy\n randomAction = random.sample(self.actionSet,1)[0]\n return randomAction\n else: #choose greedy policy\n greedyAction = None\n greedyActionVal = None\n for action in self.actionSet:\n tempActionVal = self.qFunc.q(state,action,self.weightVec)\n if (type(greedyActionVal) == type(None)\n or greedyActionVal < tempActionVal): #found better action\n greedyActionVal = tempActionVal\n greedyAction = action\n return greedyAction\n\n def takeAction(self,nextState,nextReward,done): #hepler that makes an action\n #decision and an update after observing R and S'\n self.nextState = nextState\n self.reward = nextReward\n if (done): #just need to do weight update\n qVal = self.qFunc.q(self.prevState,self.prevAction,self.weightVec)\n qGradVec = self.qFunc.gradQ(self.prevState,self.prevAction,\n self.weightVec)\n self.weightVec = (self.weightVec + self.alpha * (self.reward\n - qVal)\n * qGradVec)\n return None #no action taken\n else: #need to choose next action and weight update\n self.nextAction = self.chooseAction(self.nextState)\n nextQVal = self.qFunc.q(self.nextState,self.nextAction,\n self.weightVec)\n prevQVal = self.qFunc.q(self.prevState,self.prevAction,\n self.weightVec)\n prevQVec = self.qFunc.gradQ(self.prevState,self.prevAction,\n self.weightVec)\n self.weightVec = (self.weightVec \n + self.alpha * (self.reward + self.gamma * nextQVal - prevQVal) \n * prevQVec)\n #store our next and previous action\n self.prevState = self.nextState\n self.prevAction = self.nextAction\n return self.nextAction\n\nclass AgentEnvironmentInteraction:\n def __init__(self,gameName,alpha,gamma,epsilon,basisFunc,\n monitorFilename = None):\n #helper for initializing our environment\n self.env = gym.make(gameName)\n if (type(monitorFilename) != type(None)):\n self.env = gym.wrappers.Monitor(self.env,monitorFilename)\n #choose initial weights\n if basisFunc == linearBasisFunc:\n initWeightVec = np.zeros(len(self.env.observation_space.high) * 2)\n elif basisFunc == firstInteractionBasisFunc: #interaction one\n observationSpaceSize = len(self.env.observation_space.high)\n numLinearTerms = observationSpaceSize * 2\n numInteractionTerms = spm.comb(observationSpaceSize,2) * 2\n numTerms = (numLinearTerms + numInteractionTerms).astype(\"int\")\n initWeightVec = np.zeros(numTerms)\n else: #with polynomials\n observationSpaceSize = len(self.env.observation_space.high)\n numLinearTerms = numPolyTerms = observationSpaceSize * 2\n numInteractionTerms = spm.comb(observationSpaceSize,2) * 2\n numTerms = (numLinearTerms + numInteractionTerms \n + numPolyTerms).astype(\"int\")\n initWeightVec = np.zeros(numTerms)\n actionSet = set(range(self.env.action_space.n))\n self.agent = Agent(actionSet,initWeightVec,alpha,gamma,epsilon,basisFunc\n )\n #then some meta information\n self.episodeLengthVec = []\n self.episodeRewardVec = []\n\n def performEpisode(self): #helper for performing a given episode\n #start meta-parameters\n episodeLength = 0\n totalReward = 0\n done = False #will alter this\n #start initial state and action\n initState = self.env.reset()\n self.agent.prevState = initState\n self.agent.prevAction = self.agent.chooseAction(self.agent.prevState)\n while (not(done)): #run a step\n episodeLength += 1\n #self.env.render()\n nextState, nextReward, done, _ = self.env.step(\n self.agent.prevAction)\n totalReward += nextReward\n nextAction = self.agent.takeAction(nextState,nextReward,done)\n #then update meta-parameters\n self.episodeLengthVec.append(episodeLength)\n self.episodeRewardVec.append(totalReward)\n\n def performMultipleEpisodes(self,numEpisodes): #helper for performing\n #multiple episodes\n for episode in range(numEpisodes):\n self.performEpisode()\n \n#main process\n\nif __name__ == \"__main__\":\n #apiKey = sys.argv[1]\n #numEpisodes = sys.argv[2]\n #tests\n #stateVec = np.array([1,2,3,4])\n #action = 1\n #print firstIntWithPolynomials(stateVec,action)\n #testQ = LinearQFunc(linearBasisFunc)\n #weightVec = np.array([0,0,0,0,1,1,1,1])\n #print testQ.q(stateVec,action,weightVec)\n #print testQ.gradQ(stateVec,action,weightVec)\n epsilon = .001\n alpha = .5\n gamma = 1\n newInteraction = AgentEnvironmentInteraction(\"CartPole-v0\",alpha,gamma,\n epsilon,\n firstInteractionBasisFunc,\n \"../submission/cp-e-16\")\n newInteraction.performMultipleEpisodes(800)\n newInteraction.env.close()\n gym.upload(\"../submission/cp-e-16\",api_key = sys.argv[1])\n", "repo_name": "PLBMR/openAIGymSubmissions", "sub_path": "code/cartPoleSubmission.py", "file_name": "cartPoleSubmission.py", "file_ext": "py", "file_size_in_byte": 9605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.misc.comb", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 28, "usage_type": "name"}, {"api_name": "scipy.misc.comb", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.misc.comb", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 48, "usage_type": "name"}, {"api_name": "scipy.misc.comb", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 78, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 102, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 104, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 149, "usage_type": "call"}, {"api_name": "gym.wrappers.Monitor", "line_number": 151, "usage_type": "call"}, {"api_name": "gym.wrappers", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.misc.comb", "line_number": 158, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "scipy.misc.comb", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 164, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "gym.upload", "line_number": 222, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 222, "usage_type": "attribute"}]} +{"seq_id": "9471482796", "text": "import streamlit as st\nfrom pandas_profiling import ProfileReport\nfrom streamlit_pandas_profiling import st_profile_report\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport altair as alt\n\n\ndef make_correlation_plot(df: pd.DataFrame):\n st.subheader('Correlation Plot')\n fig, ax = plt.subplots(figsize=(10, 10))\n st.write(sns.heatmap(df.corr(), annot=True, linewidths=0.5))\n st.pyplot(fig)\n\n\ndef multi_line(data: pd.DataFrame, averages: pd.DataFrame):\n if data.shape[0] > 10:\n ticks = 10\n else:\n ticks = data.shape[0]\n deals = data.loc[data['Deal']]\n averages.reset_index(inplace=True)\n line_chart = alt.Chart(data).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer:Q',\n color='Game ID:N',\n opacity=alt.value(0.5)\n )\n line_chart_ev = None\n if st.checkbox('Show Expected Value'):\n line_chart_ev = alt.Chart(data).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Board Average:Q',\n color='Game ID:N',\n opacity=alt.value(0.2)\n )\n\n nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Round'], empty='none')\n selectors = alt.Chart(data).mark_point().encode(\n x='Round:Q',\n opacity=alt.value(0),\n ).add_selection(\n nearest\n )\n points = line_chart.mark_point().encode(\n opacity=alt.condition(nearest, alt.value(1), alt.value(0))\n )\n text = line_chart.mark_text(align='left', dx=5, dy=-5).encode(\n text=alt.condition(nearest, 'Game ID:Q', alt.value(' '))\n )\n\n # Draw a rule at the location of the selection\n rules = alt.Chart(data).mark_rule(color='gray').encode(\n x='Round:Q',\n ).transform_filter(\n nearest\n )\n average_line = alt.Chart(averages).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer:Q',\n color=alt.value('lightgray'),\n opacity=alt.value(0)\n )\n deals_series = alt.Chart(deals).mark_circle().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Amount Won:Q',\n color=alt.value('red'),\n opacity=alt.value(0)\n )\n\n if st.checkbox('Show Deals'):\n deals_series = alt.Chart(deals).mark_circle().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Amount Won:Q',\n color='Game ID:N',\n size=alt.value(60),\n opacity=alt.value(1)\n )\n if st.checkbox('Show Average'):\n average_line = alt.Chart(averages).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer:Q',\n color='Game ID:N',\n opacity=alt.value(1)\n )\n if line_chart_ev:\n layers = alt.layer(line_chart, line_chart_ev, selectors, points, rules, text, average_line,\n deals_series).properties(\n width=900,\n height=600\n )\n else:\n layers = alt.layer(line_chart, selectors, points, rules, text, average_line, deals_series).properties(\n width=900,\n height=600\n )\n st.altair_chart(layers)\n\n\ndef probability_multi_line(data: pd.DataFrame, averages: pd.DataFrame):\n if data.shape[0] > 10:\n ticks = 10\n else:\n ticks = data.shape[0]\n averages.reset_index(inplace=True)\n line_chart = alt.Chart(data).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer Percent of Average:Q',\n color='Game ID:N',\n opacity=alt.value(0.5)\n )\n\n nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Round'], empty='none')\n selectors = alt.Chart(data).mark_point().encode(\n x='Round:Q',\n opacity=alt.value(0),\n ).add_selection(\n nearest\n )\n points = line_chart.mark_point().encode(\n opacity=alt.condition(nearest, alt.value(1), alt.value(0))\n )\n text = line_chart.mark_text(align='left', dx=5, dy=-5).encode(\n text=alt.condition(nearest, 'Game ID:Q', alt.value(' '))\n )\n\n # Draw a rule at the location of the selection\n rules = alt.Chart(data).mark_rule(color='gray').encode(\n x='Round:Q',\n ).transform_filter(\n nearest\n )\n average_line = alt.Chart(averages).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer Percent of Average:Q',\n color=alt.value('red'),\n opacity=alt.value(1)\n )\n layers = alt.layer(line_chart, selectors, points, rules, text, average_line).properties(\n width=900,\n height=600\n )\n st.altair_chart(layers)\n\n\ndef box_plot(data: pd.DataFrame, feature: str, width: int = 800, height: int = 400):\n boxplot = alt.Chart(data).mark_boxplot().encode(\n x='Round:O',\n y=f'{feature}:Q'\n ).properties(\n width=width,\n height=height\n )\n st.altair_chart(boxplot)\n\n\ndef single_line(data: pd.DataFrame, game_id: str, preds: list = [], width: int = 800, height: int = 400):\n pred_df = pd.DataFrame(preds)\n line = data[data['Game ID'] == game_id]\n if line.shape[0] > 10:\n ticks = 10\n else:\n ticks = line.shape[0]\n offers = alt.Chart(line).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer:Q',\n color=alt.value('red'),\n opacity=alt.value(1)\n )\n expected = alt.Chart(line).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Board Average:Q',\n color=alt.value('gray'),\n opacity=alt.value(.75)\n )\n if len(preds) > 0:\n models = list(set(val for dic in preds for val in dic.values()))\n predictions = alt.Chart(pred_df).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Prediction:Q',\n color='Model',\n opacity=alt.value(0.4)\n )\n nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Round'], empty='none')\n selectors = alt.Chart(data).mark_point().encode(\n x='Round:Q',\n opacity=alt.value(0),\n ).add_selection(\n nearest\n )\n points = predictions.mark_point().encode(\n opacity=alt.condition(nearest, alt.value(1), alt.value(0))\n )\n text = predictions.mark_text(align='left', dx=5, dy=-5).encode(\n text=alt.condition(nearest, 'Model', alt.value(' '))\n )\n rules = alt.Chart(data).mark_rule(color='gray').encode(\n x='Round:Q',\n ).transform_filter(\n nearest\n )\n layers = alt.layer(offers, expected, predictions, selectors, points, text, rules).properties(\n width=width,\n height=height\n )\n else:\n layers = alt.layer(offers, expected).properties(\n width=width,\n height=height\n )\n st.altair_chart(layers)\n\n\ndef single_line_offers(data: pd.DataFrame, game_id: str, width: int = 800, height: int = 400):\n line = data[data['Game ID'] == game_id]\n if line.shape[0] > 10:\n ticks = 10\n else:\n ticks = line.shape[0]\n offers = alt.Chart(line).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Offer:Q',\n color=alt.value('red'),\n opacity=alt.value(1)\n )\n expected = alt.Chart(line).mark_line().encode(\n x=alt.X('Round:Q', axis=alt.Axis(tickCount=ticks, grid=False)),\n y=f'Board Average:Q',\n color=alt.value('gray'),\n opacity=alt.value(.75)\n )\n nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['Round'], empty='none')\n selectors = alt.Chart(data).mark_point().encode(\n x='Round:Q',\n opacity=alt.value(0),\n ).add_selection(\n nearest\n )\n points = offers.mark_point().encode(\n opacity=alt.condition(nearest, alt.value(1), alt.value(0))\n )\n text = offers.mark_text(align='left', dx=5, dy=-5).encode(\n text=alt.condition(nearest, 'Offer', alt.value(' '))\n )\n rules = alt.Chart(data).mark_rule(color='gray').encode(\n x='Round:Q',\n ).transform_filter(\n nearest\n )\n layers = alt.layer(offers, expected, selectors, points, text, rules).properties(\n width=width,\n height=height\n )\n st.altair_chart(layers)\n\n\ndef profiling(data: pd.DataFrame):\n data.reset_index(inplace=True)\n # data.drop(['index', 'Game ID'], axis=1, inplace=True)\n pr = ProfileReport(data, explorative=True)\n st_profile_report(pr)\n\n\ndef offers_vs_winnings(data: pd.DataFrame):\n best_offers = data.groupby('Game ID')\n game_best_offers = best_offers['Offer'].max()\n game_amount_won = best_offers['Amount Won'].max()\n\n potential_loss = game_amount_won - game_best_offers\n games = pd.concat([game_best_offers, game_amount_won, potential_loss], axis=1)\n games.rename(columns={0: \"Unrealized Winnings\"}, inplace=True, index=str)\n games.reset_index(inplace=True)\n # games.merge(ids)\n st.write(games)\n st.bar_chart(games['Unrealized Winnings'])\n st.write(games['Amount Won'].mean())\n # st.write(games['Unrealized Winnings'].mean())\n # st.write(games['Unrealized Winnings'].median())\n\n # for i, row, in data.iterrows():\n # print(row)\n # if row['Amount Won'] != 0:\n # potential_loss =\n\n\ndef offers(data: pd.DataFrame):\n offer_data = data[['Game ID', 'Round', 'Offer', 'Board Average', 'Deal', 'Amount Won']]\n averages = offer_data.groupby('Round').mean()\n offers_vs_winnings(data)\n\n if st.checkbox('Show Averages'):\n deals = offer_data[offer_data[\"Deal\"]]\n # offer_data.drop(['Deal'], inplace=True, axis=1)\n # averages.drop(['Deal'], inplace=True, axis=1)\n st.subheader('When do people take deals?')\n deals_rounds = np.histogram(\n deals['Round'], bins=11, range=(0, 10))[0]\n st.bar_chart(deals_rounds)\n st.subheader('Average Offers by Round')\n st.line_chart(averages)\n averages['% of Expected Value'] = averages['Offer'] / averages['Board Average']\n st.line_chart(averages['% of Expected Value'])\n if st.checkbox('Show Averages Data'):\n st.table(averages)\n final_round = st.slider('Final Round', min_value=1, max_value=10, step=1, value=9)\n min_offer, max_offer = st.slider('Offer Range', min_value=0, max_value=2000000, step=1000, value=[0, 2000000])\n game = str(int(st.number_input('Game', min_value=0, max_value=1000, step=1)))\n\n if len(game) == 1:\n filter_game_id = '000' + game\n elif len(game) == 2:\n filter_game_id = '00' + game\n else:\n filter_game_id = '0' + game\n\n if filter_game_id == '0000':\n offer_data = offer_data.loc[((offer_data['Round'] <= final_round) &\n (offer_data['Offer'] <= max_offer) &\n (offer_data['Offer'] >= min_offer))]\n else:\n offer_data = offer_data.loc[offer_data['Game ID'] == filter_game_id]\n multi_line(offer_data, averages)\n\n probability_data = data[\n ['Game ID', 'Round', 'Offer', 'Offer Percent of Average', 'Probability of Big Value', 'Amount Won']]\n probability_averages = offer_data.groupby('Round').mean()\n\n probability_multi_line(probability_data, probability_averages)\n # box_plot(probability_data, 'Offer Percent of Average')\n # box_plot(probability_data, 'Offer')\n\n\nif __name__ == '__main__':\n # offers()\n pass\n", "repo_name": "jstock29/dealnodeal", "sub_path": "visualization.py", "file_name": "visualization.py", "file_ext": "py", "file_size_in_byte": 11820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "41", "api": [{"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "attribute"}, {"api_name": "streamlit.subheader", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 14, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "attribute"}, {"api_name": "altair.Chart", "line_number": 25, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 26, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 26, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 32, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 33, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 34, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 34, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 37, "usage_type": "call"}, {"api_name": "altair.selection", "line_number": 40, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 41, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 43, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 48, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 48, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 51, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 51, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 55, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 60, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 61, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 61, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 63, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 64, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 66, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 67, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 67, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 69, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 73, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 74, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 75, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 75, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 78, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 81, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 82, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 83, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 83, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 86, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 89, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 102, "usage_type": "attribute"}, {"api_name": "altair.Chart", "line_number": 108, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 109, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 109, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 112, "usage_type": "call"}, {"api_name": "altair.selection", "line_number": 115, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 116, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 118, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 123, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 123, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 126, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 126, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 130, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 135, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 136, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 136, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 138, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 139, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 141, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 148, "usage_type": "attribute"}, {"api_name": "altair.Chart", "line_number": 149, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 160, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 166, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 167, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 167, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 169, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 170, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 172, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 173, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 173, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 175, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 176, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 180, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 181, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 181, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 184, "usage_type": "call"}, {"api_name": "altair.selection", "line_number": 186, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 187, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 189, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 194, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 194, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 197, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 197, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 199, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 204, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 209, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "attribute"}, {"api_name": "altair.Chart", "line_number": 222, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 223, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 223, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 225, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 226, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 228, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 229, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 229, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 231, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 232, "usage_type": "call"}, {"api_name": "altair.selection", "line_number": 234, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 235, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 237, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 242, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 242, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 245, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 245, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 247, "usage_type": "call"}, {"api_name": "altair.layer", "line_number": 252, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 256, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 259, "usage_type": "attribute"}, {"api_name": "pandas_profiling.ProfileReport", "line_number": 262, "usage_type": "call"}, {"api_name": "streamlit_pandas_profiling.st_profile_report", "line_number": 263, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 272, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 276, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 277, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 278, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 288, "usage_type": "attribute"}, {"api_name": "streamlit.checkbox", "line_number": 293, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 298, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 300, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 301, "usage_type": "call"}, {"api_name": "streamlit.line_chart", "line_number": 302, "usage_type": "call"}, {"api_name": "streamlit.line_chart", "line_number": 304, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 305, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 306, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 307, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 308, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 309, "usage_type": "call"}]} +{"seq_id": "24756853572", "text": "import tweepy\nfrom Credentials import consumer_key, consumer_secret, access_token, access_token_secret\nimport time\nimport random\n\n# Login and open the API\ndef authentication():\n global api\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n# Verify authentication\ndef verify_authentication():\n api.verify_credentials()\n print(\"Authentication OK \\n\")\n\n# Open and read the txt file with list of questions and answers\ndef read_text():\n global question_list, answer_list\n questions = open(\"Cards_Against_Humanity_questions.txt\").read()\n answers = open(\"Cards_Against_Humanity_answers.txt\").read()\n\n question_list = questions.split(\"\\n\")\n answer_list = answers.split(\"\\n\")\n\n# Function to combine random questions with rando answer\ndef create_tweet():\n global tweet\n question = random.choice(question_list)\n if \"?\" in question:\n tweet = (question.replace(\"ANSW\", random.choice(answer_list).strip(\".\")))\n else:\n tweet = (question.replace(\"ANSW\", (random.choice(answer_list).lower()).strip(\".\")))\n\n# Send the tweet to the Twitter bot\ndef send_tweet_to_bot():\n api.update_status(tweet)\n\n# Run all the previous functios\ndef main():\n authentication()\n verify_authentication()\n read_text()\n create_tweet()\n send_tweet_to_bot()\n\nwhile True:\n main()\n\n # Text of the tweet\n print(tweet, \"\\n\")\n\n # Time delay of 1h\n time.sleep(3600)\n\n# Add other function inclduing automatic replies to users (create random answer function)\n", "repo_name": "lucavillani/Bots_Against_Humanity", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 1591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "Credentials.consumer_key", "line_number": 9, "usage_type": "argument"}, {"api_name": "Credentials.consumer_secret", "line_number": 9, "usage_type": "argument"}, {"api_name": "Credentials.access_token", "line_number": 10, "usage_type": "argument"}, {"api_name": "Credentials.access_token_secret", "line_number": 10, "usage_type": "argument"}, {"api_name": "tweepy.API", "line_number": 11, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 32, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "12899952278", "text": "\"\"\"\nДан json файл. Найдите в нём все поля \"updated\" и поменяйте значение на текущие дату и время в формате ISO 8601.\n\"\"\"\n\nimport json\nimport datetime\n\ndata = {}\n\n\ndef update_updated_fields(data):\n for key, item in data.items():\n if isinstance(item, dict):\n update_updated_fields(item)\n elif key == 'updated':\n data[key] = datetime.datetime.now().isoformat()\n\n\nwith open('file.json', 'r', encoding='UTF-8') as file:\n data.update(json.load(file))\n update_updated_fields(data)\n\nwith open('file.json', 'w', encoding='UTF-8') as file:\n json.dump(data, file, indent=4, ensure_ascii=False)\n", "repo_name": "DaryaKhatsuk/Test_from_bpelectric", "sub_path": "task_four_updated-fields.py", "file_name": "task_four_updated-fields.py", "file_ext": "py", "file_size_in_byte": 701, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "42314074607", "text": "\"\"\"Settings for running tests\"\"\"\n\nimport os\n\nfrom .utils import is_postgres\n\nDEBUG = True\nUSE_TZ = True\nTIME_ZONE = \"UTC\"\nSECRET_KEY = \"secret_key\"\n\nif is_postgres():\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"HOST\": os.environ.get(\"POSTGRES_HOST\", \"127.0.0.1\"),\n \"PORT\": os.environ.get(\"POSTGRES_PORT\", \"5432\"),\n \"NAME\": \"postgres\",\n \"USER\": \"postgres\",\n \"PASSWORD\": \"postgres\",\n \"TEST\": {\n \"NAME\": \"test_postgres\",\n },\n }\n }\nelse:\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"db.sqlite3\",\n \"OPTIONS\": {\"timeout\": 50},\n \"TEST\": {\n \"NAME\": \"db-test.sqlite3\",\n },\n }\n }\n\nINSTALLED_APPS = [\n \"django_toosimple_q.tests.concurrency\",\n \"django_toosimple_q.tests.demo\",\n \"django_toosimple_q\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n]\n\nROOT_URLCONF = \"django_toosimple_q.tests.urls\"\n\nMIDDLEWARE = (\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.template.context_processors.request\",\n ]\n },\n }\n]\n\nSTATIC_URL = \"/static/\"\n", "repo_name": "olivierdalang/django-toosimple-q", "sub_path": "django_toosimple_q/tests/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 1841, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "33", "api": [{"api_name": "utils.is_postgres", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "73005810013", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.quantized as quant\nfrom quantize import QuantizedLinear, quantize, dequantize\n\n\ndef mae(x, y):\n return torch.abs(x - y).mean()\n\n\ndef mse(x, y):\n return ((x-y)*(x-y)).mean()\n\n\ndef q_dq(x, n_bits=8):\n a, b = x.max(), x.min()\n n_nums = (1 << n_bits)-1\n s = (a-b)/n_nums\n z = ((-b) / s).round().to(torch.int)\n dq = quant.DeQuantize()(quant.Quantize(s, z, torch.quint8)(x))\n return dq\n\n\ndef compare_Linear_vs_QuantizedLinear(x, hidden):\n linear = nn.Linear(hidden, hidden+1)\n ql = QuantizedLinear(hidden, hidden+1)\n ql.bias = linear.bias\n print(linear.weight)\n q, s, z = quantize(linear.weight)\n print(q)\n ql.weight = nn.Parameter(q)\n ql.scale = nn.Parameter(s)\n ql.zero_point = nn.Parameter(z)\n\n y = linear(x)\n\n q_y = ql._forward(x)\n\n print(\"compare_Linear_vs_QuantizedLinear\")\n print(\"mae:\")\n print(mae(y, q_y))\n print(\"mse:\")\n print(mse(y, q_y))\n print()\n print()\n\n\ndef compare_x_vs_quantized(x):\n q, s, z = quantize(x)\n dq = dequantize(q, s, z)\n\n print(\"compare_x_vs_quantized\")\n print(\"mae:\")\n print(mae(x, dq))\n print(\"mse:\")\n print(mse(x, dq))\n print()\n print()\n\n\ndef compare_x_vs_torch_quantized(x):\n dq = q_dq(x)\n\n print(\"compare_x_vs_torch_quantized\")\n print(\"mae:\")\n print(mae(x, dq))\n print(\"mse:\")\n print(mse(x, dq))\n print()\n print()\n\n\ndef compare_QLinear_vs_QuantLinear(x, h):\n qx = torch.quantize_per_tensor(x, 1.0, 0, torch.quint8)\n quantl = quant.Linear(h, h)(qx)\n print(quantl)\n\n\nif __name__ == \"__main__\":\n h = 10\n x = torch.randn(h, h)\n compare_Linear_vs_QuantizedLinear(x, h)\n # compare_x_vs_quantized(x)\n # compare_x_vs_torch_quantized(x)\n", "repo_name": "hayman42/ft-bert-pyt", "sub_path": "compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 1761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "torch.abs", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.int", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn.quantized.DeQuantize", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.quantized", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.quantized.Quantize", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.quint8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "quantize.QuantizedLinear", "line_number": 26, "usage_type": "call"}, {"api_name": "quantize.quantize", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "quantize.quantize", "line_number": 49, "usage_type": "call"}, {"api_name": "quantize.dequantize", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.quantize_per_tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.quint8", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.nn.quantized.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.quantized", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "43360008389", "text": "# https://leetcode.com/problems/binary-tree-preorder-traversal/\n#\n# Time Complexity: O(n)\n# Space Complexity: O(n)\n#\n\nfrom typing import List\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n nodes = []\n return self.iterative(root, nodes)\n def recursive(self, root, nodes):\n if not root:\n return []\n nodes.append(root.val)\n if root.left:\n self.recursive(root.left, nodes)\n if root.right:\n self.recursive(root.right, nodes)\n return nodes\n \n def _compute(self, node, result):\n result.append(node.val)\n \n def iterative(self, root, nodes):\n stack = []\n current = root\n while current or stack:\n if current:\n self._compute(current, nodes)\n stack.append(current)\n current = current.left\n else:\n node = stack.pop()\n current = node.right\n return nodes\n", "repo_name": "tracebyte/LeetCode", "sub_path": "python/BinaryTreePreorderTraversal.py", "file_name": "BinaryTreePreorderTraversal.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "typing.List", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "9542595552", "text": "import torch\nimport csv\nfrom torch.utils.data import Dataset\n\nfrom Constants import *\n\nclass SequenceDataset(Dataset):\n def __init__(self, dataset_file_path, tokenizer, device):\n # Read JSON file and assign to headlines variable (list of strings)\n self.data_dict = []\n self.device = device\n self.lable_set = set()\n file_data = []\n for file in dataset_file_path:\n with open(file) as csvfile:\n csv_reader = csv.reader(csvfile)\n #file_header = next(csv_reader)\n for row in csv_reader:\n file_data.append(row)\n\n for row in file_data:\n data = []\n self.lable_set.add(row[0])\n data.append(row[0])\n data.append(row[1])\n self.data_dict.append(data)\n self.tokenizer = tokenizer\n self.tag2id = self.set2id(self.lable_set)\n print(self.tag2id)\n\n def __len__(self):\n return len(self.data_dict)\n\n def __getitem__(self, index):\n DEVICE = self.device\n input = {}\n label, line = self.data_dict[index]\n label = self.tag2id[label]\n tokens = self.tokenizer(line, padding=\"max_length\", truncation=True)\n input['labels'] = label\n for k, v in tokens.items():\n input[k] = torch.tensor(v, dtype=torch.long, device=DEVICE)\n\n return input\n\n\n def set2id(self, item_set, pad=None, unk=None):\n item2id = {}\n if pad is not None:\n item2id[pad] = 0\n if unk is not None:\n item2id[unk] = 1\n\n for item in item_set:\n item2id[item] = len(item2id)\n\n return item2id", "repo_name": "nkzhlee/Data_Augmentation", "sub_path": "DataModules.py", "file_name": "DataModules.py", "file_ext": "py", "file_size_in_byte": 1677, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "32491125434", "text": "from collections import defaultdict, Counter\n\n\ndef solve(input):\n adj = defaultdict(list)\n for l in input.split(\"\\n\"):\n u, _, v = l.partition(\"-\")\n adj[u].append(v)\n adj[v].append(u)\n\n def dfs(u, ctr):\n if u == \"end\":\n return 1\n twice = any(v == 2 for k, v in ctr.items() if k.islower())\n n = 0\n for v in adj[u]:\n if v != \"start\":\n if v.islower():\n lim = 1 if twice else 2\n if ctr[v] < lim:\n nxt = ctr.copy()\n nxt[v] += 1\n n += dfs(v, nxt)\n else:\n n += dfs(v, ctr)\n return n\n\n return dfs(\"start\", Counter())\n\n\nwith open(\"input.txt\") as f:\n print(solve(f.read().strip()))\n", "repo_name": "jo3-l/advent", "sub_path": "2021/12/p2.py", "file_name": "p2.py", "file_ext": "py", "file_size_in_byte": 820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "33", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "26683821444", "text": "from flask import Flask,redirect,url_for,render_template, request\nimport random\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(\"__main__\")\n#change directory\n#UPLOAD_FOLDER = '/Users/tonywang/Desktop/Interface/FloodNet/static'\n#app.config['IMAGE_FOLDER'] = UPLOAD_FOLDER\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'\ndb = SQLAlchemy(app)\nselected_all_paths = []\n#set maximum number of time an image shown in the web interface\nmax_shown = 3\n\nclass Image(db.Model):\n imageID = db.Column(db.String(255), primary_key=True)\n #how many times image has been selected\n status = db.Column(db.Integer, default=0)\n #how many times image has been retrieve\n count = db.Column(db.Integer, default=0)\n #status / count, click thorugh rate\n ctr = db.Column(db.Float, default=0.0)\n #reach_max maximum number of time an image shown in the web interface\n reach_max = db.Column(db.Boolean, default=False)\n \n\n\n\n@app.before_first_request\ndef create_tables():\n #rewrite db every run \n if os.path.exists('database.db'):\n os.remove('database.db')\n db.create_all()\n#app.debug = True\n@app.route(\"/\")\ndef home():\n image_paths = get_random_images(9)\n global selected_all_paths\n selected_all_paths = image_paths\n print(image_paths)\n return render_template(\"randomImage.html\", image_paths=image_paths)\n\ndef get_random_images(num_images):\n image_folder = 'static'\n image_files = os.listdir(image_folder)\n image_files = [file for file in image_files if not file.startswith('.DS_Store')]\n random.shuffle(image_files)\n selected_images = image_files[:num_images]\n image_paths = [os.path.join(image_folder, img_file) for img_file in selected_images]\n\n # Filter out image paths where reach_max is True\n image_paths = [path for path in image_paths if not Image.query.filter_by(imageID=path, reach_max=True).first()]\n\n return image_paths\n\n\n\n\n\n\n\n\n@app.route(\"/update_status\", methods=[\"POST\"])\ndef update_status():\n selected_paths = request.form.getlist(\"image_paths\")\n selected_images = [path.split(\"|\")[1] for path in selected_paths]\n print(selected_images,\"HERE!!!!!!!!\")\n # Update the database with the image paths\n for path in selected_paths:\n image_path, image_id = path.split(\"|\")\n image = Image.query.get(image_id)\n if image:\n image.status += 1\n print(f\"Updated image {image.imageID} - New status: {image.status}\")\n else:\n new_image = Image(imageID=image_id, status=1)\n db.session.add(new_image)\n print(f\"Added new image {new_image.imageID} - Status: {new_image.status}\")\n\n for path in selected_all_paths:\n image_id = path\n image = Image.query.get(image_id)\n if image:\n image.count += 1\n image.ctr = image.status / image.count # Calculate CTR\n #set up reach_max (maximum number of time an image shown in the web interface)\n if image.count > max_shown:\n image.reach_max = True\n print(f\"Updated image {image.imageID} - New count: {image.count}\")\n else:\n new_image = Image(imageID=image_id, count=1)\n db.session.add(new_image)\n print(f\"Added new image {new_image.imageID} - Count: {new_image.count}\")\n\n db.session.commit()\n \n return redirect(url_for(\"home\"))\n\n\n\n@app.route(\"/\")\ndef user(name):\n return f\"Hello {name}!\"\n\n@app.route(\"/home\")\ndef test():\n return render_template(\"home.html\")\n\n@app.route(\"/image_folder_content\")\ndef image_folder_content():\n image_folder = app.config['IMAGE_FOLDER']\n image_files = os.listdir(image_folder)\n return \"
\".join(image_files)\n\n@app.route(\"/database\")\ndef view_database():\n images = Image.query.all()\n return render_template(\"database.html\", images=images)\n@app.route(\"/admin\")\ndef admin():\n return redirect(url_for(\"user\", name = \"dawei\"))\nif __name__ == \"__main__\":\n app.run(debug=True)", "repo_name": "Dawei0328/FloodNet-2023", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request.form.getlist", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 109, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "20846386949", "text": "import base64\nimport hashlib\nimport hmac\nimport time\nfrom urllib.parse import urlencode\n\nimport httpx\n\nfrom constants import API_PRIVATE_PATH, PRIVATE_API_KEY, PRIVATE_API_URL, PUBLIC_API_KEY\n\n\nclass PrivateAPI:\n def _get_kraken_signature(self, urlpath, data):\n post_data = urlencode(data)\n nonce = data['nonce']\n encoded_data = (nonce + post_data).encode()\n hashed_data = hashlib.sha256(encoded_data).digest()\n message = urlpath.encode() + hashed_data\n decoded_private_key = base64.b64decode(PRIVATE_API_KEY)\n mac = hmac.new(decoded_private_key, message, hashlib.sha512).digest()\n signature_digest = base64.b64encode(mac).decode()\n return signature_digest\n\n def _get_nonce(self):\n return str(time.time_ns())\n \n async def _get_private_api_response(self, endpoint, data={}) -> httpx.Response:\n data.update({\n 'nonce': self._get_nonce()\n })\n headers = {\n 'API-Key': PUBLIC_API_KEY,\n 'API-Sign': self._get_kraken_signature(f'{API_PRIVATE_PATH}{endpoint}', data)\n }\n resp = None\n async with httpx.AsyncClient() as client:\n resp = await client.post(f'{PRIVATE_API_URL}{endpoint}', headers=headers, data=data)\n return resp\n ", "repo_name": "AeroStarCreations/Krak", "sub_path": "private_api.py", "file_name": "private_api.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "urllib.parse.urlencode", "line_number": 14, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 17, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 19, "usage_type": "call"}, {"api_name": "constants.PRIVATE_API_KEY", "line_number": 19, "usage_type": "argument"}, {"api_name": "hmac.new", "line_number": 20, "usage_type": "call"}, {"api_name": "hashlib.sha512", "line_number": 20, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 21, "usage_type": "call"}, {"api_name": "time.time_ns", "line_number": 25, "usage_type": "call"}, {"api_name": "constants.PUBLIC_API_KEY", "line_number": 32, "usage_type": "name"}, {"api_name": "constants.API_PRIVATE_PATH", "line_number": 33, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 36, "usage_type": "call"}, {"api_name": "constants.PRIVATE_API_URL", "line_number": 37, "usage_type": "name"}, {"api_name": "httpx.Response", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "10087514150", "text": "import re\nimport os\nimport numpy as np\nfrom tools import reverse_seq\n\ndef load_position(file):\n\n f = open(file,'r')\n line = f.readline()\n\n fw_join = open(file+'_position.txt', 'w')\n\n while line:\n if line.startswith('>'):\n id = line.split(' ')[0]\n result = re.search(r'location=(.*)', line).group().replace('..>','..').replace('<','')\n if 'join' in result:\n position = result.split('join(')[1].split(')')[0]\n if 'complement' in result:\n fw_join.write(id + '\\t' + '1' + '\\n')\n fw_join.write(position + '\\n')\n else:\n fw_join.write(id + '\\t' + '0' + '\\n')\n fw_join.write(position + '\\n')\n line = f.readline()\n\n fw_join.close()\n\n f.close()\n\n\ndef load_seq(file):\n\n f = open(file, 'r')\n line = f.readline()\n\n fw_seq = open(file + '_seq.txt', 'w')\n\n while line:\n if line.startswith('>'):\n id = line.split(' ')[0]\n fw_seq.write(id+'\\n')\n else:\n fw_seq.write(line)\n line = f.readline()\n\n\n fw_seq.close()\n\n f.close()\n\n\ndef load_position_info(positionfile):\n \"\"\"\n Reads Alternetive Splicing position information\n :param positionfile: File path to position information\n :return: position dictionary\n key: AS id e.g. >lcl|NW_015379189.1_mrna_XM_015765349.1_50431\n value: [complementary marker, [int number position marker]] e.g. ['0', [3401, 4025, 4160, 4408, 4516, 5122, 5221, 5288, 5384, 5446, 5531, 5689]]\n \"\"\"\n position = dict()\n\n f = open(positionfile, 'r')\n line = f.readline()\n while line:\n if line.startswith('>'):\n id = line.strip().split('\\t')[0]\n comp = line.strip().split('\\t')[1]\n tmp = [int(x) for x in f.readline().strip().replace(',', '..').split('..')]\n position[id] = [comp, tmp]\n # print(id)\n # print(position[id])\n line = f.readline()\n f.close()\n\n return position\n\n\ndef load_sequences_segment(seqdir):\n \"\"\"\n Reads genomic segmented sequences\n :param seqdir: 8000bp genomic sequences segment file path\n :return: sequences dictionary\n key: genomic sequences segments id e.g. >NC_029260.1 1\n value: segment sequences e.g. 'cctaaaccctaaaccctaaaccctaaaccctaaacc'\n \"\"\"\n id2seq = dict()\n\n seqfiles = []\n for ro, dirs, files in os.walk(seqdir, True):\n for i in files:\n if i.endswith('_segment.fa'):\n seqfiles.append(seqdir + i)\n for seqfile in seqfiles:\n f = open(seqfile, 'r')\n line = f.readline()\n id = ''\n while line:\n if line.startswith('>'):\n id = line.strip()\n id2seq[id] = ''\n line = f.readline()\n continue\n id2seq[id] += line.strip()\n line = f.readline()\n f.close()\n\n return id2seq\n\ndef extract_dna_outer_dna_inner(pos,searchlist,id2seq,framesize):\n \"\"\"\n Extracting junction sequences by position\n :param pos: AS position list e.g. [3401, 4025, 4160, 4408, 4516, 5122, 5221, 5288, 5384, 5446, 5531, 5689]\n :param searchlist: targeted genomic segment sequences decided by boundary of AS\n :param id2seq: genomic segmented sequences\n :param framesize: chosen AS framesize, default 30bp*2\n :return: dna_out : list of outerside sequences\n dna_in : list of inside sequences\n \"\"\"\n sequences = ''\n # print(searchlist)\n base_position = int(searchlist[0].strip().split('\\t')[-1])\n\n for i in searchlist:\n # print(i)\n sequences += id2seq[i]\n\n dna_out = []\n dna_in = []\n # print(pos)\n # print(len(sequences))\n for i in range(len(pos)): # pos[1] number checked, minimal 51\n if i == 0: # first position\n if pos[i+1] - pos[i] + 1 < framesize : # RNA_interval_less_than_framesize\n\n if pos[i] < framesize: # DNA_head_position_less_than_framesize\n dna_out.append('N' * (framesize - len(sequences[:pos[i]])) + sequences[:pos[i]]) # dna_left_outer\n dna_in.append(sequences[pos[i]:pos[i+1]+1] + 'N'*(framesize-len(sequences[pos[i]:pos[i+1]+1]))) # dna_left_inner\n else: # DNA_head_position_larger_than_framesize\n dna_out.append(sequences[pos[i]-base_position-framesize:pos[i]-base_position])\n dna_in.append(sequences[pos[i]-base_position:pos[i + 1] - base_position + 1] + 'N'*(framesize-len(sequences[pos[i]-base_position:pos[i + 1] - base_position + 1])))\n\n else: # RNA_interval_larger_than_framesize\n\n if pos[i] < framesize:\n dna_out.append('N' * (framesize - len(sequences[:pos[i]])) + sequences[:pos[i]])\n dna_in.append(sequences[pos[i]:pos[i] + framesize])\n else:\n dna_out.append(sequences[pos[i]-base_position-framesize:pos[i]-base_position])\n dna_in.append(sequences[pos[i]-base_position:pos[i]-base_position + framesize])\n\n if i % 2 == 0 and i != 0: # even position\n if pos[i+1] - pos[i] + 1 < framesize : # RNA_interval_less_than_framesize\n\n if pos[i] - pos[i-1] < framesize: # DNA_interval_less_than_framesize\n dna_out.append('N' * (framesize - len(sequences[pos[i-1]-base_position:pos[i]-base_position])) + sequences[pos[i-1]-base_position:pos[i]-base_position]) # dna_left_outer\n dna_in.append(sequences[pos[i]-base_position:pos[i+1]-base_position+1] + 'N'*(framesize-len(sequences[pos[i]-base_position:pos[i+1]-base_position+1]))) # dna_left_inner\n else: # DNA_interval_larger_than_framesize\n dna_out.append(sequences[pos[i]-base_position-framesize:pos[i]-base_position])\n dna_in.append(sequences[pos[i]-base_position:pos[i + 1] - base_position + 1] + 'N'*(framesize-len(sequences[pos[i]-base_position:pos[i + 1] - base_position + 1])))\n\n else: # RNA_interval_larger_than_framesize\n\n if pos[i] - pos[i - 1] < framesize: # DNA_interval_less_than_framesize\n dna_out.append('N' * (framesize - len(sequences[pos[i-1]-base_position:pos[i]-base_position])) + sequences[pos[i-1]-base_position:pos[i]-base_position]) # dna_left_outer\n dna_in.append(sequences[pos[i]-base_position:pos[i]-base_position + framesize])\n else:# DNA_interval_larger_than_framesize\n dna_out.append(sequences[pos[i]-base_position-framesize:pos[i]-base_position])\n dna_in.append(sequences[pos[i]-base_position:pos[i]-base_position + framesize])\n\n if i % 2 == 1 and i != len(pos)-1: # odd position\n if pos[i]-pos[i-1] 0:\n pos_min = (pos[0] - framesize)//8000\n else:\n pos_min = 0\n pos_max = (pos[-1] + framesize)//8000\n # print(item)\n # print(genome_id)\n # print(pos)\n # print(pos_min)\n # print(pos_max)\n # print('\\n')\n search_id = []\n if pos_max == pos_min:\n search_id.append('>' + genome_id + '\\t' + str(pos_min * 8000 + 1)) # >NC_001320.1\t1\n else:\n for i in range(pos_max-pos_min + 1):\n search_id.append('>' + genome_id + '\\t' + str((pos_min + i)*8000 + 1)) # >NC_001320.1\t1\n search_dict[item] = search_id\n\n fw = open('./search_index_file.txt','w')\n for i in search_dict:\n fw.write(i+'\\n')\n fw.write('\\t'.join(search_dict[i])+'\\n')\n fw.close()\n return search_dict\n\ndef load_junction_seqs(seqdir,positionfile,framesize):\n # output junction sequences\n position = load_position_info(positionfile)\n\n search_index = search_coordinator(position, framesize)\n\n id2seq = load_sequences_segment(seqdir)\n\n fw = open('./search_index_file_sequences.txt', 'w')\n for item in search_index:\n fw.write(item + '\\n')\n id = ''\n seqs = ''\n for i in search_index[item]:\n id += i + '||'\n seqs += id2seq[i]\n fw.write(id + '\\n')\n fw.write(seqs + '\\n')\n fw.close()\n\n fw = open('./GCF_001433935.1_IRGSP-1.0_genomic.fna_DNA_outer_DNA_inner.fa', 'w')\n for item in position:\n if item in search_index:\n searchlist = search_index[item]\n # print(searchlist)\n else:\n continue\n comp = position[item][0]\n pos = position[item][1]\n dna_outer,dna_inner = extract_dna_outer_dna_inner(pos, searchlist, id2seq, framesize)\n # print(item)\n # print(position[item])\n # print(dna_outer)\n fw.write(item + '\\t'+ comp + '\\n')\n if comp == '1':\n re_inner = [reverse_seq(x) for x in dna_inner[::-1]]\n re_outer = [reverse_seq(x) for x in dna_outer[::-1]]\n for i in range(len(re_outer) // 2):\n fw.write(re_outer[2 * i]+'\\t'+re_inner[2 * i]+'\\t'+re_inner[2 * i + 1]+'\\t'+re_outer[2 * i + 1]+'\\n')\n else:\n for i in range(len(dna_outer) // 2):\n fw.write(dna_outer[2 * i]+'\\t'+dna_inner[2 * i]+'\\t'+dna_inner[2 * i + 1]+'\\t'+dna_outer[2 * i + 1]+'\\n')\n fw.close()\n\n\n# def load_dna_outer(seqdir,positionfile,framesize):\n# position = dict()\n# id2seq = dict()\n#\n# f = open(positionfile, 'r')\n# line = f.readline()\n# while line:\n# if line.startswith('>'):\n# id = line.strip().split('\\t')[0]\n# comp = line.strip().split('\\t')[1]\n# tmp = [int(x) for x in f.readline().strip().replace(',', '..').split('..')]\n# position[id] = [[comp],tmp]\n# # print(id)\n# # print(position[id])\n# # # > lcl | NW_015379189.1_mrna_XM_015765349.1_50431\n# # [['0'], [3401, 4025, 4160, 4408, 4516, 5122, 5221, 5288, 5384, 5446, 5531, 5689]]\n#\n# line = f.readline()\n# f.close()\n#\n# search_index = search_coordinator(position,framesize)\n#\n# seqfiles = []\n# for ro, dirs, files in os.walk(seqdir, True):\n# for i in files:\n# if i.endswith('_segment.fa'):\n# seqfiles.append(seqdir + i)\n# for seqfile in seqfiles:\n# f = open(seqfile,'r')\n# line = f.readline()\n# id = ''\n# while line:\n# if line.startswith('>'):\n# id = line.strip()\n# id2seq[id] = ''\n# line = f.readline()\n# continue\n# id2seq[id] += line.strip()\n# line = f.readline()\n# # >NC_029260.1 1\n# # cctaaaccctaaaccctaaaccctaaaccctaaacc\n# f.close()\n#\n# fw = open('./search_index_file_sequences.txt','w')\n# for item in search_index:\n# fw.write(item + '\\n')\n# id = ''\n# seqs = ''\n# for i in search_index[item]:\n# id += i + '||'\n# seqs += id2seq[i]\n# fw.write(id + '\\n')\n# fw.write(seqs + '\\n')\n# fw.close()\n#\n# fw = open('./GCF_001433935.1_IRGSP-1.0_genomic.fna_DNA_outer.fa', 'w')\n# for item in position:\n# if item in search_index:\n# searchlist = search_index[item]\n# print(searchlist)\n# else:\n# continue\n# # print(searchlist)\n# comp = position[item][0]\n# pos = position[item][1]\n# dna_outer = extract_dna_outer(pos,searchlist,id2seq,framesize)\n# # print(item)\n# # print(position[item])\n# # print(dna_outer)\n# fw.write(item + '\\n')\n# if comp == '1':\n# temp = [x[::-1] for x in dna_outer[::-1]]\n# for i in range(len(temp)//2):\n# fw.write(temp[2*i] + '\\t' + temp[2*i+1] + '\\n' )\n# else:\n# temp = dna_outer\n# for i in range(len(temp) // 2):\n# fw.write(temp[2 * i] + '\\t' + temp[2 * i + 1] + '\\n')\n# fw.close()\n#\n#\n# def extract_dna_outer(pos,searchlist,id2seq,framesize):\n# base_position = 0\n# sequences = ''\n# # print(searchlist)\n# base_position = int(searchlist[0].strip().split('\\t')[-1])\n#\n# for i in searchlist:\n# # print(i)\n# sequences += id2seq[i]\n#\n# dna_out = []\n# # print(pos)\n# # print(len(sequences))\n# for i in range(len(pos)):\n# if i % 2 == 0 :\n# # left outer\n# if pos[i] < framesize:\n# dna_out.append('N'*(framesize-len(sequences[:pos[i]])) + sequences[:pos[i]])\n# else:\n# dna_out.append(sequences[pos[i]-base_position-framesize:pos[i]-base_position])\n# else:\n# # right outer\n# dna_out.append(sequences[pos[i]-base_position+1:pos[i]-base_position+framesize+2])\n#\n# return dna_out\n\n\n\n\n\n\n# def genomic_interval(file):\n# f = open(file,'r')\n# lines = f.readlines()\n# f.close()\n# interval = 100000\n# cnt = 0\n# for line in lines:\n# if line.startswith('>'):\n# pass\n# else:\n# tmp = [int(x) for x in line.strip().replace(',','..').split('..')]\n# for i in range(int(len(tmp)/2)):\n# if i == 0:\n# continue\n# inter = tmp[2*i]-tmp[2*i-1] + 1\n# if inter < 30:\n# cnt += 1\n# if interval > inter:\n# interval = inter\n# print(interval)\n# print(cnt)\n# # minimal interval = 3\n# # for GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt\n\n\ndef cut_genomic_file(file):\n # cut whole genome file into single fasta marker file\n f = open(file,'r')\n lines = f.readlines()\n f.close()\n\n seqs = {}\n id = ''\n for line in lines:\n if line.startswith('>'):\n id = line.strip().split(' ')[0]\n seqs[id] = []\n continue\n seqs[id].append(line)\n\n for item in seqs:\n fw = open('./genome_db/'+item.replace('>','')+'.fa','w')\n fw.write(item+'\\n')\n for i in seqs[item]:\n fw.write(i)\n fw.close()\n\ndef load_db_list(root):\n # load single fasta files path\n filelist = []\n for ro, dirs, files in os.walk(root, True):\n for i in files:\n if i.startswith('N'):\n filelist.append(root + i)\n return filelist\n\ndef genome_coordinator(filelist):\n # cut file into every 8000bp\n for file in filelist:\n f = open(file, 'r')\n line = f.readline()\n\n fw = open(file+'_genome_segment.fa', 'w')\n id = ''\n cntline = 0\n lb = 0\n rb = 0\n seq = ''\n while line:\n if line.startswith('>'):\n cntline = 0\n lb = 0\n rb = 0\n id = line.strip().split(' ')[0] # >NC_029256.1\n if cntline % 100 == 0 and cntline != 0:\n # fw.write(id + '\\t' + str(lb + 1) + '_' + str(rb) + '\\n')\n fw.write(id + '\\t' + str(lb + 1) + '\\n')\n fw.write(seq + '\\n')\n lb = rb\n seq = ''\n line = f.readline()\n cntline += 1\n seq += line.strip()\n rb += len(line.strip())\n\n # fw.write(id + '\\t' + str(lb + 1) + '_' + str(rb) + '\\n')\n fw.write(id + '\\t' + str(lb + 1) + '\\n')\n fw.write(seq + '\\n')\n\n fw.close()\n f.close()\n\n\ndef scan(file):\n f = open(file,'r')\n line = f.readline()\n max = 0\n min = 10000000\n while line:\n if line.startswith('>'):\n line = ' '.join([' '.join(x.split('..')) for x in f.readline().split('\\t')[0].split(',')])\n num1 = int(line.split(' ')[0])\n num2 = int(line.split(' ')[-1])\n if num2 > num1:\n if num2 > max:\n max = num2\n if num1 < min:\n min = num1\n else:\n print('oo')\n if num1 > max:\n max = num1\n if num2 < min:\n min = num2\n line = f.readline()\n print(max)\n print(min)\n\ndef count_position_number(file):\n f = open(file,'r')\n lines = f.readlines()\n f.close()\n\n for line in lines:\n if line.startswith('>'):\n continue\n else:\n tmp = line.strip().replace(',','..').split('..')\n if len(tmp) % 2 ==0:\n if int(tmp[1])<100:\n print(line)\n else:\n print(line)\n\ndef negative_sample(seqdir,positionfile,framesize):\n position = load_position_info(positionfile)\n\n search_index = search_coordinator(position, framesize)\n\n id2seq = load_sequences_segment(seqdir)\n\n AS_segment = set()\n genomic_segment = set()\n for item in search_index:\n for j in search_index[item]:\n AS_segment.add(j)\n for item in id2seq:\n genomic_segment.add(item)\n\n # print(len(genomic_segment)) # 46833\n # print(len(AS_segment)) # 28338\n # print(len(genomic_segment-AS_segment)) # 18495\n temp = dict()\n for seqid in genomic_segment-AS_segment:\n nega_seq = cut_negative_seq(id2seq[seqid],framesize)\n temp[seqid] = nega_seq\n\n fw= open('./negative_sample.txt','w')\n for item in temp:\n for i in range(len(temp[item])//2):\n fw.write(item+'\\t'+'junction_'+str(i+1)+'\\t')\n fw.write(temp[item][2*i]+'\\t'+temp[item][2*i+1]+'\\t'+'0'+'\\n')\n fw.close()\n\n\ndef cut_negative_seq(seq,framesize):\n index = np.random.choice(len(seq)-2*framesize-1,56,replace=False)\n nega_seq = []\n for i in index:\n nega_seq.append(seq[i:i+framesize]+'\\t'+seq[i+framesize:i+2*framesize])\n return nega_seq\n\n\n\n\n\ndef chech_title(file):\n f = open(file, 'r')\n lines = f.readlines()\n f.close()\n header = set()\n for line in lines:\n if line.startswith('>'):\n header.add(line.strip().split('_')[2])\n print(header)\n # {'mrna', 'miscrna', 'ncrna', 'trna'}\n\ndef positive_sample(file):\n f = open(file, 'r')\n line = f.readline()\n\n AS_pool = dict()\n id = ''\n while line:\n if line.startswith('>'):\n id = line.strip()\n AS_pool[id] = []\n line = f.readline()\n AS_pool[id].append(line.strip())\n line = f.readline()\n f.close()\n\n fw = open(file+'_positive_sample.txt', 'w')\n for item in AS_pool:\n if item.split('_')[2] == 'mrna':\n for i in range(len(AS_pool[item])):\n fw.write(item + '\\t' + 'junction_'+str(i+1)+'\\t')\n fw.write(AS_pool[item][i]+'\\t'+'1'+'\\n')\n fw.close()\n # output more than 257196 AS positive item\n\n\nif __name__=='__main__':\n # load_position('/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa')\n # load_seq('/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa')\n\n # scan('/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt')\n\n # load_rna_inner('/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_seq.txt',\n # '/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt',\n # 30)\n\n # genomic_interval('./GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt')\n # genome_coordinator('/home/david/Desktop/asdecoder/genome_db/GCF_001433935.1_IRGSP-1.0_genomic.fna')\n\n # load_dna_outer('/home/david/Desktop/asdecoder/genome_db/GCF_001433935.1_IRGSP-1.0_genomic.fna_genome_segment.fa',\n # '/home/david/Desktop/asdecoder/GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt',\n # 30)\n\n\n\n # load_junction_seqs('./genome_db/segment/',\n # './GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt',\n # 30)\n\n # count_position_number('./GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt')\n\n # Cut genomic file and build coordinator\n # cut_genomic_file('./genome_db/GCF_001433935.1_IRGSP-1.0_genomic.fna')\n # filelist = load_db_list('./genome_db/')\n # genome_coordinator(filelist)\n\n negative_sample('./genome_db/segment/',\n './GCF_001433935.1_IRGSP-1.0_rna_from_genomic.fa_position.txt',\n 30)\n\n # chech_title('./GCF_001433935.1_IRGSP-1.0_genomic.fna_DNA_outer_DNA_inner.fa')\n\n # positive_sample('./GCF_001433935.1_IRGSP-1.0_genomic.fna_DNA_outer_DNA_inner.fa')", "repo_name": "xiaolouge123/AS_decoder-", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 22988, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "re.search", "line_number": 16, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 90, "usage_type": "call"}, {"api_name": "tools.reverse_seq", "line_number": 267, "usage_type": "call"}, {"api_name": "tools.reverse_seq", "line_number": 268, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 555, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 555, "usage_type": "attribute"}]} +{"seq_id": "9079700356", "text": "from conans import ConanFile, CMake\nfrom conans.tools import unzip, replace_in_file\n\nclass NanaConan(ConanFile):\n name = \"nana\"\n generators = \"cmake\"\n version = \"1.4.0\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"enable_audio\" : [True, False], \"enable_png\" : [True, False], \"enable_jpeg\" : [True, False]}\n default_options = \"enable_audio=False\", \"enable_png=False\", \"enable_jpeg=False\"\n license = \"Boost\"\n url = \"https://github.com/MojaveWastelander/conan_nana\"\n \n def source(self):\n self.run(\"git clone https://github.com/cnjinhao/nana.git\")\n \n def requirements(self):\n if self.options.enable_jpeg:\n self.requires(\"libjpeg-turbo/1.4.2@lasote/stable\")\n \n if self.options.enable_png:\n self.requires(\"libpng/1.6.23@lasote/stable\")\n\n def build(self):\n cmake = CMake(self.settings)\n print(\"Compiler: %s %s\" % (self.settings.compiler, self.settings.compiler.version))\n print(\"Arch: %s\" % self.settings.arch) \n lib_opt = \"-DCMAKE_DEBUG_POSTFIX:STRING={0} -DCMAKE_RELEASE_POSTFIX:STRING={0}\".format(\"r\" if self.settings.build_type == \"Release\" else \"d\") \n replace_lines = '''cmake_minimum_required(VERSION 2.8)\ninclude(../conanbuildinfo.cmake)\nconan_basic_setup()\n'''\n replace_in_file(\"nana/CMakeLists.txt\", \"cmake_minimum_required(VERSION 2.8)\", replace_lines)\n\n # process options\n if self.options.enable_audio:\n lib_opt += \" -DNANA_CMAKE_ENABLE_AUDIO:BOOL=ON\"\n \n if self.options.enable_png:\n lib_opt += \" -DNANA_CMAKE_ENABLE_PNG:BOOL=ON\"\n \n if self.options.enable_jpeg:\n lib_opt += \" -DNANA_CMAKE_ENABLE_JPEG:BOOL=ON\"\n\n self.run('cmake %s/nana %s %s' % (self.conanfile_directory, cmake.command_line, lib_opt))\n self.run(\"cmake --build . %s\" % cmake.build_config)\n\n\n def package(self):\n self.copy(\"*\", dst=\"include\", src=\"nana/include\")\n self.copy(\"*\", dst=\"source\", src=\"nana/source\")\n self.copy(\"*.lib\", dst=\"lib\", src=\"Release\")\n self.copy(\"*.lib\", dst=\"lib\", src=\"Debug\")\n self.copy(\"*.lib\", dst=\"lib\", src=\"lib\")\n self.copy(\"*.a\", dst=\"lib\", src=\"lib\")\n\n def package_info(self):\n print(\"Compiler: %s %s\" % (self.settings.compiler, self.settings.compiler.version))\n print(\"Arch: %s\" % self.settings.arch) \n print(\"Build_type: %s\" % self.settings.build_type) \n if self.settings.compiler == \"Visual Studio\":\n print(\"Runtime: %s\" % self.settings.compiler.runtime)\n self.cpp_info.libs = [\"nana%s\" % (\"r\" if self.settings.build_type == \"Release\" else \"d\")]\n", "repo_name": "MojaveWastelander/conan_nana", "sub_path": "conanfile.py", "file_name": "conanfile.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "conans.ConanFile", "line_number": 4, "usage_type": "name"}, {"api_name": "conans.CMake", "line_number": 25, "usage_type": "call"}, {"api_name": "conans.tools.replace_in_file", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "28241963777", "text": "#!/usr/bin/env python3\nfrom flask import Flask, render_template, request, redirect, make_response\nfrom functools import wraps\nimport jwt, uuid, os, logging\n\napp = Flask(__name__)\n\nUSERS = {} # {userID: {'saldo': saldo, 'prodotti': [prodotti acquistati]}}\nSECRET_KEY = os.urandom(36).hex()\nassert len(SECRET_KEY) == 36 * 2\n\n'''\nThe `OBJECTS` dictionary contains information about the products available for purchase in the PaguriShop web application. Each product is represented by a dictionary with the following keys:\n\n- `id`: a unique identifier for the product\n- `name`: the name of the product\n- `foto`: the filename of the product's image\n- `description`: a short description of the product\n- `text`: additional information about the product\n- `price`: the price of the product in PaguriShop's currency (not specified in the code)\n\nThe `OBJECTS` dictionary is used throughout the application to display information about the available products and to handle purchases made by users.\n'''\nOBJECTS = {\n '1' : {\n 'id' : '1',\n 'name' : 'La flag',\n 'foto' : 'flag.png',\n 'description' : 'Prova a prendermi se ci riesci',\n 'text' : 'flag{Wh3r3_d1d_y0u_g3t_th3_m0n3ys?}',\n 'price' : 1000\n },\n '2' : {\n 'id' : '2',\n 'name' : 'Il gabibbo',\n 'foto' : 'gabibbo.png',\n 'description' : 'Mascotte epica quasi regalata',\n 'text' : 'Bella scelta (meglio della flag) ma adesso ti sta tracciando e tra poco saprà dove abiti',\n 'price' : 10\n },\n '3' : {\n 'id' : '3',\n 'name' : 'I segreti di stato di St3pNy',\n 'foto' : 'stepny.png',\n 'description' : 'Il vero motivo per cui ha evaso le tasse',\n 'text' : 'Non è un vero segreto di stato, semplicemente gli faceva comodo così poteva shoppare su Fortnite',\n 'price' : 500\n }\n}\n\ndef authorized(f):\n \"\"\"\n A decorator function that checks if the user is authorized to access a certain route.\n If the user is not authorized, it creates a new account and sets a cookie with a unique session ID.\n If the user is authorized, it returns the decorated function with the user ID as an argument.\n\n Args:\n f (function): The function to be decorated.\n\n Returns:\n function: The decorated function.\n \"\"\"\n @wraps(f)\n def decorated(*args, **kwargs):\n try:\n token = request.cookies['session']\n except:\n token = None\n if not token:\n token = str(uuid.uuid4())\n jwtEnc = jwt.encode({'userID': token}, SECRET_KEY, algorithm='HS256')\n response = make_response(redirect('/'))\n response.set_cookie('session', jwtEnc)\n USERS[token] = {'saldo': 500, 'prodotti': []}\n my_logger.info(f'[{token}] ha creato un nuovo account')\n return response\n try:\n data = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])\n except:\n token = str(uuid.uuid4())\n jwtEnc = jwt.encode({'userID': token}, SECRET_KEY, algorithm='HS256')\n response = make_response(redirect('/'))\n response.set_cookie('session', jwtEnc)\n USERS[token] = {'saldo': 500, 'prodotti': []}\n my_logger.info(f'[{token}] ha creato un nuovo account')\n return response\n return f(data[\"userID\"], *args, **kwargs)\n return decorated\n\n@app.route('/', methods=['GET'])\n@authorized\ndef index(userID: str):\n \"\"\"\n Renders the index page with a list of objects and the user's saldo.\n\n Args:\n userID (str): The ID of the user.\n\n Returns:\n str: The rendered HTML template.\n \"\"\"\n objects = [x for x in OBJECTS.values()]\n saldo = USERS[userID]['saldo']\n if request.args.get('error') != None:\n return render_template('index.html', error=request.args.get('error'), objects=objects, saldo=str(saldo))\n return render_template('index.html', objects=objects, saldo=str(saldo))\n\n@app.route('/history', methods=['GET'])\n@authorized\ndef history(userID: str):\n \"\"\"\n Renders the history.html template with the list of products purchased by the user and their current balance.\n\n Args:\n userID (str): The ID of the user whose purchase history is being displayed.\n\n Returns:\n The rendered history.html template with the list of purchased products and the user's current balance.\n \"\"\"\n return render_template('history.html', objects=[OBJECTS[x] for x in USERS[userID]['prodotti']], saldo=str(USERS[userID]['saldo']))\n\n@app.route('/buy', methods=['POST'])\n@authorized\ndef buy(userID: str):\n \"\"\"\n This function handles the purchase of products by a user.\n\n Args:\n userID (str): The ID of the user making the purchase.\n\n Returns:\n A redirect to the user's purchase history page.\n\n Raises:\n None\n \"\"\"\n id = request.form['id']\n if id not in ['1', '2', '3']:\n return make_response(redirect('/?error=Prodotto non valido'))\n \n qty = request.form['qty']\n try:\n qty = int(qty)\n except:\n return make_response(redirect('/?error=Il carrello contiene una quantità non valida'))\n\n if USERS[userID]['saldo'] < OBJECTS[id]['price'] * int(qty):\n return make_response(redirect('/?error=Non hai abbastanza soldi'))\n\n if(qty > 0):\n USERS[userID]['prodotti'].append(id)\n #TODO: controllare che qty sia un numero positivo maggiore di 0\n USERS[userID]['saldo'] -= OBJECTS[id]['price'] * int(qty) # aggiorno il saldo\n \n if id == '1':\n my_logger.info(f'[{userID}] ha aggiunto la flag al carrello') # vediamo se funziona\n \n return redirect('/history')\n\n# Guardo solo i miei log (altrimenti mi esplode la console)\nlogging.getLogger(\"werkzeug\").setLevel(logging.ERROR)\nmy_logger = logging.getLogger(__name__)\nmy_logger.setLevel(logging.INFO)\nmy_handler = logging.StreamHandler()\nmy_logger.addHandler(my_handler)\n \nif __name__ == '__main__':\n app.run('0.0.0.0', port=80, debug=True)", "repo_name": "AlBovo/Ministage-2023", "sub_path": "PaguriShop/src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.cookies", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 70, "usage_type": "call"}, {"api_name": "jwt.encode", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 72, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 78, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 80, "usage_type": "call"}, {"api_name": "jwt.encode", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 161, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 161, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 162, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 163, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "10068936916", "text": "import tkinter as tk\nfrom tkinter import ttk\nimport functools\nfrom PIL import Image, ImageTk\nimport re\n\n\nclass CustomDialog(tk.Toplevel):\n def __init__(self, parent, db_dict):\n tk.Toplevel.__init__(self, parent)\n x_pos = parent.winfo_x() + 20\n y_pos = parent.winfo_y() + 20\n\n if 'message' in db_dict:\n msg = db_dict['message']\n else:\n db_dict['message'] = ''\n msg = \"\"\n message = msg\n\n if 'entry_qty' in db_dict:\n self.entry_qty = db_dict['entry_qty']\n else:\n self.entry_qty = 0\n\n if 'entry_per_row' in db_dict:\n entry_per_row = db_dict['entry_per_row']\n else:\n entry_per_row = 1\n\n entry_lines_qty = int(self.entry_qty/entry_per_row)\n # print(f'entry_lines_qty {entry_lines_qty}')\n\n new_lines_qty = message.count('\\n')\n hei = 16*new_lines_qty + 44*entry_lines_qty + 60\n\n minH = 80\n ## set minimum height to minH pixels\n if hei maxW:\n maxW = len(line)\n\n width = maxW * 8\n\n minW = 270\n ## set minimum with to $minW pixels\n if width < minW:\n width = minW\n\n # print(f'self.max {maxW}, width {width}')\n # self.geometry(f'{width}x{hei}+{x_pos}+{y_pos}')\n self.geometry(f'+{x_pos}+{y_pos}')\n self.title(db_dict['title'])\n # self.bind('', lambda event: print(self.geometry()))\n\n self.fr1 = tk.Frame(self)\n fr_img = tk.Frame(self.fr1)\n if re.search(\"tk::icons\", db_dict['icon']):\n use_img_run = db_dict['icon']\n else:\n self.imgRun = Image.open(db_dict['icon'])\n use_img_run = ImageTk.PhotoImage(self.imgRun)\n l_img = tk.Label(fr_img, image=use_img_run)\n l_img.image = use_img_run\n l_img.pack(padx=10, anchor='n')\n\n fr_right = tk.Frame(self.fr1)\n fr_msg = tk.Frame(fr_right)\n l_msg = tk.Label(fr_msg, text=db_dict['message'])\n l_msg.pack(padx=10)\n\n if 'entry_lbl' in db_dict:\n entry_lbl = db_dict['entry_lbl']\n else:\n entry_lbl = \"\"\n if 'entry_frame_bd' in db_dict:\n bd = db_dict['entry_frame_bd']\n else:\n bd = 2\n self.ent_dict = {}\n if self.entry_qty > 0:\n self.list_ents = []\n fr_ent = tk.Frame(fr_right, bd=bd, relief='groove')\n for fi in range(0,self.entry_qty):\n f = tk.Frame(fr_ent, bd=0, relief='groove')\n txt = entry_lbl[fi]\n lab = tk.Label(f, text=txt)\n self.ent_dict[txt] = tk.StringVar()\n # CustomDialog.ent_dict[fi] = self.ent_dict[fi]\n self.list_ents.append(ttk.Entry(f, textvariable=self.ent_dict[txt]))\n # print(f'txt:{len(txt)}, entW:{ent.cget(\"width\")}')\n self.list_ents[fi].pack(padx=2, side='right', fill='x')\n self.list_ents[fi].bind(\"\", functools.partial(self.cmd_ent, fi))\n if entry_lbl != \"\":\n lab.pack(padx=2, side='right')\n row = int((fi)/entry_per_row)\n column = int((fi)%entry_per_row)\n # print(f'fi:{fi}, txt:{txt}, row:{row} column:{column} entW:{ent.cget(\"width\")}')\n f.grid(padx=(2, 10), pady=2, row=row, column=column, sticky='e')\n\n fr_msg.pack()\n if self.entry_qty > 0:\n fr_ent.pack(anchor='e', padx=2, pady=2, expand=1)\n\n fr_img.grid(row=0, column=0)\n fr_right.grid(row=0, column=1)\n\n self.frBut = tk.Frame(self)\n print(f\"buts:{db_dict['type']}\")\n\n for butn in db_dict['type']:\n self.but = tk.ttk.Button(self.frBut, text=butn, width=10, command=functools.partial(self.on_but, butn))\n self.but.bind(\"\", functools.partial(self.on_but, butn))\n self.but.pack(side=\"left\", padx=2)\n if 'default' in db_dict:\n default = db_dict['default']\n else:\n default = 0\n if db_dict['type'].index(butn) == default:\n self.but.configure(state=\"active\")\n # self.bind('', (lambda e, b=self.but: self.but.invoke()))\n self.but.focus_set()\n self.default_but = self.but\n\n if self.entry_qty > 0:\n self.list_ents[0].focus_set()\n\n self.fr1.pack(fill=\"both\", padx=2, pady=2)\n\n self.frBut.pack(side=\"bottom\", fill=\"y\", padx=2, pady=2)\n\n self.var = tk.StringVar()\n self.but = \"\"\n\n def cmd_ent(self, fi, event=None):\n # print(f'cmd_ent self:{self}, fi:{fi}, entry_qty:{self.entry_qty}, event:{event}')\n if fi+1 == self.entry_qty:\n # last entry -> set focus to default button\n self.default_but.invoke()\n # pass\n else:\n # not last entry -> set focus to next entry\n self.list_ents[fi+1].focus_set()\n\n def on_but(self, butn, event=None):\n # print(f'on_but self:{self}, butn:{butn}, event:{event}')\n self.but = butn\n self.destroy()\n # def on_ok(self, event=None):\n # self.but = \"ok\"\n # self.destroy()\n # def ca_ok(self, event=None):\n # self.but = \"ca\"\n # self.destroy()\n\n def show(self):\n self.wm_deiconify()\n # self.entry.focus_force()\n self.wait_window()\n # try:\n # print(f'DialogBox show ent_dict:{self.ent_dict}')\n # except Exception as err:\n # print(err)\n return [self.var.get(), self.but, self.ent_dict]\n\n\nclass Example(tk.Frame):\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n self.button = tk.Button(self, text=\"Get Input\", command=self.on_button)\n self.label1 = tk.Label(self, text=\"\", width=20)\n self.label2 = tk.Label(self, text=\"\", width=20)\n self.button.pack(padx=8, pady=8)\n self.label1.pack(side=\"bottom\", fill=\"both\", expand=True)\n self.label2.pack(side=\"bottom\", fill=\"both\", expand=True)\n\n def on_button(self):\n string, str12 = CustomDialog(self, \"Enter something:\").show()\n self.label1.configure(text=\"You entered:\\n\" + string)\n self.label2.configure(text=\"You pressed:\\n\" + str12)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.wm_geometry(\"400x200\")\n Example(root).pack(fill=\"both\", expand=True)\n root.mainloop()", "repo_name": "RadIlyaG/SF1p_Py", "sub_path": "dialogBox.py", "file_name": "dialogBox.py", "file_ext": "py", "file_size_in_byte": 6647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tkinter.Toplevel", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 61, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 62, "usage_type": "call"}, {"api_name": "re.search", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 67, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 88, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 90, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.ttk.Entry", "line_number": 95, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 95, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 98, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 113, "usage_type": "call"}, {"api_name": "tkinter.ttk.Button", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 117, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 117, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 118, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 137, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tkinter.Frame.__init__", "line_number": 174, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 175, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 176, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 177, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "42401061222", "text": "import json\nimport os\nimport socket\nimport sys\n\nfrom google.protobuf.json_format import MessageToJson\n\nfrom request import continue_connection_establishment, client_data_request\n\nimport Protobuf.RRWorkload_pb2 as workloadpb2\n\n\npath = sys.path[1]\n\n\n# write the data inside the file\ndef write_in_file(data_id, data_type_store, information):\n with open(path + f\"/Protobuf/Result/{data_id}/{data_type_store}_{rfw_id}.json\", \"w\") as file:\n json.dump(MessageToJson(information), file)\n\n\nif __name__ == '__main__':\n PORT = 4000\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # server socket\n\n s.connect((socket.gethostname(), PORT))\n print(\"Client is connected to the server\\n\")\n\n keep_connection_establishment = True\n while keep_connection_establishment:\n # store all the data here after making the request for them\n\n rfw_id, benchmark_type, workload_metric, batch_unit, batch_id, batch_size, data_type = client_data_request()\n\n # a directory is created to store the data\n os.makedirs(path + f\"/Protobuf/Result/{rfw_id}\")\n\n # Serialize the data\n rfw = workloadpb2.RFW(rfw_id=rfw_id, benchmark_type=benchmark_type, workload_metric=workload_metric,\n batch_unit=batch_unit, batch_id=batch_id, batch_size=batch_size, batch_type=data_type)\n request = rfw.SerializeToString()\n\n write_in_file(rfw_id, \"rfw\", rfw)\n s.sendall(request)\n print(request)\n\n # receive data\n data = s.recv(1024)\n\n # deserialize\n response = workloadpb2.RFD()\n response.ParseFromString(data)\n print(\"Response : \")\n print(response)\n\n write_in_file(rfw_id, \"rfd\", response)\n\n # if user enters anything expect of yes then program is terminated\n keep_connection_establishment = continue_connection_establishment()\n", "repo_name": "RifadulHaque/TCP_Local", "sub_path": "Protobuf/Client/client_protobuf.py", "file_name": "client_protobuf.py", "file_ext": "py", "file_size_in_byte": 1877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 19, "usage_type": "call"}, {"api_name": "google.protobuf.json_format.MessageToJson", "line_number": 19, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 25, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 25, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 25, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 27, "usage_type": "call"}, {"api_name": "request.client_data_request", "line_number": 34, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 37, "usage_type": "call"}, {"api_name": "Protobuf.RRWorkload_pb2.RFW", "line_number": 40, "usage_type": "call"}, {"api_name": "Protobuf.RRWorkload_pb2", "line_number": 40, "usage_type": "name"}, {"api_name": "Protobuf.RRWorkload_pb2.RFD", "line_number": 52, "usage_type": "call"}, {"api_name": "Protobuf.RRWorkload_pb2", "line_number": 52, "usage_type": "name"}, {"api_name": "request.continue_connection_establishment", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "33556515892", "text": "import sys\nfrom copy import copy\nfrom dataclasses import dataclass, field\nfrom itertools import combinations\nfrom typing import List, NamedTuple\n\n\nclass Item(NamedTuple):\n name: str\n cost: int\n damage: int\n armor: int\n\n\n@dataclass\nclass Character:\n hitpoints: int\n damage: int = 0\n armor: int = 0\n _items: List[Item] = field(default_factory=list)\n\n @property\n def items(self) -> List[Item]:\n return self._items\n\n @items.setter\n def items(self, value: List[Item]):\n self._items = value\n for item in self._items:\n self.armor += item.armor\n self.damage += item.damage\n\n @property\n def item_cost(self) -> int:\n return sum(i.cost for i in self._items)\n\n\nweapons: List[Item] = [\n Item('Dagger', 8, 4, 0),\n Item('Shortsword', 10, 5, 0),\n Item('Warhammer', 25, 6, 0),\n Item('Longsword', 40, 7, 0),\n Item('Greataxe', 74, 8, 0),\n]\narmor: List[Item] = [\n Item('Leather', 13, 0, 1),\n Item('Chainmail', 31, 0, 2),\n Item('Splintmail', 53, 0, 3),\n Item('Bandedmail', 75, 0, 4),\n Item('Platemail', 102, 0, 5)\n]\nrings: List[Item] = [\n Item('Damage +1', 25, 1, 0),\n Item('Damage +2', 50, 2, 0),\n Item('Damage +3', 100, 3, 0),\n Item('Defense +1', 20, 0, 1),\n Item('Defense +2', 40, 0, 2),\n Item('Defense +3', 80, 0, 3)\n]\n\nboss_orig = Character(0)\nfor line in sys.stdin:\n value = int(line.split(': ')[1])\n if 'Hit Points' in line:\n boss_orig.hitpoints = value\n elif 'Damage' in line:\n boss_orig.damage = value\n elif 'Armor' in line:\n boss_orig.armor = value\n\nplayers: List[Character] = []\nfor w in weapons:\n for a in list(combinations(armor, 0)) + list(combinations(armor, 1)):\n for r in list(combinations(rings, 0)) + \\\n list(combinations(rings, 1)) + \\\n list(combinations(rings, 2)):\n items: List[Item] = [w]\n for x in a:\n items.append(x)\n for x in r:\n items.append(x)\n player = Character(100)\n player.items = items\n players.append(player)\n\nwinners: List[Character] = []\nloosers: List[Character] = []\nfor player in players:\n boss = copy(boss_orig)\n player_attacking: bool = True\n while player.hitpoints > 0 and boss.hitpoints > 0:\n if player_attacking:\n boss.hitpoints -= max(player.damage - boss.armor, 1)\n else:\n player.hitpoints -= max(boss.damage - player.armor, 1)\n player_attacking = not player_attacking\n if player.hitpoints > 0:\n winners.append(player)\n else:\n loosers.append(player)\n\nprint('Part 1:', min(p.item_cost for p in winners))\nprint('Part 2:', max(p.item_cost for p in loosers))\n", "repo_name": "anders-ahsman/advent-of-code", "sub_path": "2015/day21/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "typing.NamedTuple", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 20, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 62, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 71, "usage_type": "name"}, {"api_name": "itertools.combinations", "line_number": 73, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 74, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 75, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 76, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 87, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "1206293799", "text": "import json\nimport re\nfrom copy import deepcopy\n\nclass JsonObject(object):\n _version = '0.0.0'\n _pathPrefix = 'obj'\n _defaultConfig = {}\n _defaultFieldType = 'auto'\n _fieldTypes = {\n 'id': int\n }\n _camelCaseRe = re.compile(r\"\"\".+?(?= len(rv):\n return default\n elif not isinstance(rv, dict):\n if default:\n return default\n raise Exception(\"Not a dict %s in '%r': %r\" % (\n step, path, rv))\n elif step not in rv:\n matches = dict([\n (match.replace(\"*\", ''), match)\n for match in rv\n if \"*\" in match\n ])\n found = \"\"\n for match, replace in matches.items():\n if match in step and len(replace) > len(found):\n found = replace\n if found:\n step = found\n else:\n return default\n rv = rv[step]\n return rv\n\n def delPath(self, path, structure=None):\n if not isinstance(path, list):\n path = self.splitPath(path)\n\n rv = structure \\\n if structure is not None \\\n else self._config\n\n for step in path[:-1]:\n if isinstance(step, int):\n if not isinstance(rv, list):\n raise Exception(\"Not a list %s in %r\" % (\n step, path))\n elif not isinstance(rv, dict):\n raise Exception(\"Not a dict %s in %r: %r\" % (\n step, path, rv))\n rv = rv[step]\n del rv[path[-1]]\n\n def castFieldType(self, value, path=[], cast=None, debug=False):\n if not isinstance(path, list):\n path = self.splitPath(path)\n if cast is None:\n path = [\n step for step in path\n if not isinstance(step, int) and step != '+'\n ]\n cast = self.getPath(\n path,\n default=self._defaultFieldType,\n structure=self._fieldTypes\n )\n if debug:\n print('Cast', path, '=', cast)\n\n if isinstance(value, list):\n if debug:\n print('list', path)\n return [\n self.castFieldType(v, path, cast, debug)\n for v in value\n ]\n\n if isinstance(value, dict) and (cast == 'auto' or isinstance(cast, dict)):\n if debug:\n print('dict', path)\n return dict([\n (\n step,\n self.castFieldType(\n v,\n path + [step],\n self._getCast(cast, step),\n debug\n )\n )\n for step, v in value.items()\n ])\n\n if cast == int:\n if debug and value in [None, '']:\n print('empty int %s %r' % (path, value))\n if value is None:\n return value\n if value == '':\n return 0\n\n if value in [None, 'None'] or cast == 'auto':\n if debug:\n print('blank', cast, value)\n return value\n\n try:\n if debug:\n print('casting', cast, value)\n return cast(value)\n except Exception as error:\n print(error, path, cast, type(value), value)\n raise\n\n\n def setPath(self, path, value):\n if not isinstance(path, list):\n path = self.splitPath(path)\n\n if path[0] == '_config':\n raise Exception(\"%d Cannot set path %r = %r\" % (\n self.id, path, value\n ))\n\n value = self.castFieldType(value, path)\n\n rv = self._config\n for i in range(len(path)-1):\n step = path[i]\n\n next_type = dict\n if isinstance(path[i+1], int) or path[i+1] == '+':\n next_type = list\n\n if isinstance(step, int) or step == '+':\n if not isinstance(rv, list):\n raise Exception(\"Not a list at %s %r: %r\" % (step, path, rv))\n elif step == '+':\n step = len(rv)\n if step >= len(rv):\n rv.extend([next_type()] * (step - len(rv) + 1))\n else:\n if not isinstance(rv, dict):\n raise Exception(\"Not a dict at %s %r: %r\" % (step, path, rv))\n if step not in rv:\n rv[step] = next_type()\n rv = rv[step]\n step = path[-1]\n if step == '+':\n step = len(rv)\n if step >= len(rv):\n rv.extend([0] * (step - len(rv) + 1))\n rv[step] = value\n return value\n\n\nclass JsonObjectDataMapper(object):\n obj = JsonObject\n table = None\n fields = []\n order = 'id'\n join_tables = {}\n\n def __init__(self, db):\n self._db = db\n\n def _read(self, dbrow):\n if dbrow is None \\\n or not isinstance(dbrow, dict) \\\n or 'config' not in dbrow \\\n or 'id' not in dbrow:\n raise ValueError(\"Invalid dbrow: %r\" % dbrow)\n\n dbrow['config'] = json.loads(dbrow['config'])\n for field in self.fields:\n dbrow['config'][field] = dbrow[field]\n dbrow['config']['id'] = dbrow['id']\n return self.obj(dbrow['config'])\n\n def _write(self, obj):\n if not isinstance(obj, JsonObject):\n raise ValueError(\"Invalid object: %r\" % obj)\n\n fields = self.fields + ['id']\n dbrow = dict(\n (field, obj.getPath(field, None))\n for field in fields\n )\n dbrow['config'] = dict(\n (field, value)\n for field, value in obj\n if field not in fields\n )\n dbrow['config'] = json.dumps(\n dbrow['config'],\n sort_keys=True,\n indent=4\n )\n return dbrow\n\n def create(self, config=None):\n \"\"\"Creates an object from config\"\"\"\n obj = self.obj(config)\n return obj\n\n def getById(self, obj_id):\n \"\"\"Returns an object from table by obj_id\"\"\"\n with self._db.connect() as db:\n cur = db.execute(\"\"\"\n SELECT *\n FROM `%s`\n WHERE `id` = ?\n \"\"\" % self.table,\n [obj_id]\n )\n obj = cur.fetchone()\n cur.close()\n\n if obj is None:\n return None\n return self._read(dict(obj))\n\n def getMultiple(self, where=\"1\", values={}):\n \"\"\"Returns a list of obj matching the where clause\"\"\"\n with self._db.connect() as db:\n cur = db.execute(\"\"\"\n SELECT * FROM `%s` WHERE %s ORDER BY `%s`\n \"\"\" % (self.table, where, self.order),\n values\n )\n objs = cur.fetchall() or []\n cur.close()\n\n return [\n self._read(dict(obj))\n for obj in objs\n if obj is not None\n ]\n\n def clearJoinTables(self, obj):\n \"\"\"Clears entries from join tables by key=obj.id\"\"\"\n with self._db.connect() as db:\n for table, (attrib, key) in list(self.join_tables.items()):\n db.execute(\"\"\"\n DELETE FROM `%s`\n WHERE `%s` = ?\n \"\"\" % (table, key),\n [\n obj[attrib]\n ]\n )\n db.commit()\n\n def fillJoinTables(self, obj):\n \"\"\"Populates entries in join tables by key=obj.id\"\"\"\n if not self.join_tables:\n return\n raise NotImplementedError(\n \"Needed for %r\" % self.join_tables\n )\n\n def save(self, obj):\n \"\"\"Insert or Update an obj\"\"\"\n if obj.id:\n return self.update(obj)\n return self.insert(obj)\n\n def insert(self, obj):\n \"\"\"Insert a new obj\"\"\"\n new_obj = self._write(obj)\n if 'id' in new_obj:\n del new_obj['id']\n\n with self._db.connect() as db:\n cur = db.execute(\"\"\"\n INSERT INTO `%s`\n (`config`, %s)\n VALUES\n (:config, %s)\n \"\"\" % (\n self.table,\n ', '.join([\"`%s`\" % f for f in new_obj]),\n ', '.join([\":%s\" % f for f in new_obj])\n ),\n new_obj\n )\n db.commit()\n obj.id = cur.lastrowid\n cur.close()\n\n self.fillJoinTables(obj)\n\n return self.getById(cur.lastrowid)\n\n def update(self, obj):\n \"\"\"Updates an existing obj\"\"\"\n new_obj = self._write(obj)\n\n with self._db.connect() as db:\n db.execute(\"\"\"\n UPDATE `%s`\n SET `config` = :config, %s\n WHERE `id` = :id\n \"\"\" % (\n self.table,\n ', '.join([\n \"`%s` = :%s\" % (f, f)\n for f in self.fields\n ])\n ),\n new_obj\n )\n db.commit()\n\n self.clearJoinTables(obj)\n self.fillJoinTables(obj)\n\n return self.getById(obj.id)\n\n def delete(self, obj):\n \"\"\"Deletes an object from the table\"\"\"\n self.clearJoinTables(obj)\n\n with self._db.connect() as db:\n db.execute(\"\"\"\n DELETE\n FROM `%s`\n WHERE `id` = ?\n \"\"\" % self.table,\n [obj.id]\n )\n db.commit()\n", "repo_name": "SebastiaanPasterkamp/dnd-machine", "sub_path": "app/models/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 16257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "41", "api": [{"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "re.X", "line_number": 18, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 23, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 389, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 409, "usage_type": "call"}]} +{"seq_id": "25876977564", "text": "#!/usr/bin/python3\n\nimport tweepy\nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt\n\n# making connection with twitter\n# defining consumer key and secret\nc_key=\"mB8A4V55vI5TQfi5DYTpE67AY\"\nc_sec=\"kQNdImsBEbYvWHx0PaEw6Xy4neTjcW4xtQQNxvn9NBdZidOFNb\"\n\n# to search & get inf you neeed to use token\n# token key & secret\nt_key=\"1003575775767846913-JwvqB6rnq9BecoNs3buryRN8XIPSpQ\"\nt_sec=\"yoajqEJnMIpTWqB4plMpLkMe8NRq1bzAOvHIAjHIQGSmr\"\n\n# connecting twitter API\nauth_session=tweepy.OAuthHandler(c_key,c_sec)\n# print(dir(auth_session))\n\n# setting, sending token key & secret\nauth_session.set_access_token(t_key,t_sec)\n\n# now accessing API\napi_connect=tweepy.API(auth_session)\n\n# searching data\ntopic=api_connect.search('modi')\n#print (topic)\n\nfor i in topic:\n # tokenized and clean \n blob_data=TextBlob(i.text)\n # applying sentiment output will be polarity \nprint(blob_data.sentiment)\n\n", "repo_name": "aishwaryashand/ML", "sub_path": "data_analyse.py", "file_name": "data_analyse.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 25, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "6113219464", "text": "from typing import List, Optional, Tuple\n\nfrom django.conf import settings\nfrom django.db.models import F, Sum\nfrom django.db.models.functions import Length\n\nfrom zerver.models import BotStorageData, UserProfile\n\n\nclass StateError(Exception):\n pass\n\n\ndef get_bot_storage(bot_profile: UserProfile, key: str) -> str:\n try:\n return BotStorageData.objects.get(bot_profile=bot_profile, key=key).value\n except BotStorageData.DoesNotExist:\n raise StateError(\"Key does not exist.\")\n\n\ndef get_bot_storage_size(bot_profile: UserProfile, key: Optional[str] = None) -> int:\n if key is None:\n return (\n BotStorageData.objects.filter(bot_profile=bot_profile)\n .annotate(key_size=Length(\"key\"), value_size=Length(\"value\"))\n .aggregate(sum=Sum(F(\"key_size\") + F(\"value_size\")))[\"sum\"]\n or 0\n )\n else:\n try:\n return len(key) + len(\n BotStorageData.objects.get(bot_profile=bot_profile, key=key).value\n )\n except BotStorageData.DoesNotExist:\n return 0\n\n\ndef set_bot_storage(bot_profile: UserProfile, entries: List[Tuple[str, str]]) -> None:\n storage_size_limit = settings.USER_STATE_SIZE_LIMIT\n storage_size_difference = 0\n for key, value in entries:\n assert isinstance(key, str), \"Key type should be str.\"\n assert isinstance(value, str), \"Value type should be str.\"\n storage_size_difference += (len(key) + len(value)) - get_bot_storage_size(bot_profile, key)\n new_storage_size = get_bot_storage_size(bot_profile) + storage_size_difference\n if new_storage_size > storage_size_limit:\n raise StateError(\n \"Request exceeds storage limit by {} characters. The limit is {} characters.\".format(\n new_storage_size - storage_size_limit, storage_size_limit\n )\n )\n else:\n for key, value in entries:\n BotStorageData.objects.update_or_create(\n bot_profile=bot_profile, key=key, defaults={\"value\": value}\n )\n\n\ndef remove_bot_storage(bot_profile: UserProfile, keys: List[str]) -> None:\n queryset = BotStorageData.objects.filter(bot_profile=bot_profile, key__in=keys)\n if len(queryset) < len(keys):\n raise StateError(\"Key does not exist.\")\n queryset.delete()\n\n\ndef is_key_in_bot_storage(bot_profile: UserProfile, key: str) -> bool:\n return BotStorageData.objects.filter(bot_profile=bot_profile, key=key).exists()\n\n\ndef get_keys_in_bot_storage(bot_profile: UserProfile) -> List[str]:\n return list(\n BotStorageData.objects.filter(bot_profile=bot_profile).values_list(\"key\", flat=True)\n )\n", "repo_name": "zulip/zulip", "sub_path": "zerver/lib/bot_storage.py", "file_name": "bot_storage.py", "file_ext": "py", "file_size_in_byte": 2667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18770, "dataset": "github-code", "pt": "33", "api": [{"api_name": "zerver.models.UserProfile", "line_number": 14, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.get", "line_number": 16, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 16, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.DoesNotExist", "line_number": 17, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 17, "usage_type": "name"}, {"api_name": "zerver.models.UserProfile", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.functions.Length", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models.Sum", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 26, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 32, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.DoesNotExist", "line_number": 34, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 34, "usage_type": "name"}, {"api_name": "zerver.models.UserProfile", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.settings.USER_STATE_SIZE_LIMIT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.update_or_create", "line_number": 54, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 54, "usage_type": "name"}, {"api_name": "zerver.models.UserProfile", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 60, "usage_type": "name"}, {"api_name": "zerver.models.UserProfile", "line_number": 66, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.filter", "line_number": 67, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 67, "usage_type": "name"}, {"api_name": "zerver.models.UserProfile", "line_number": 70, "usage_type": "name"}, {"api_name": "zerver.models.BotStorageData.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "zerver.models.BotStorageData.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "zerver.models.BotStorageData", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "29824805502", "text": "from cmath import sqrt\nfrom typing import final\nimport numpy as np # Note you can also import functions as abbreviations\nimport matplotlib.pyplot as plt \nimport csv\nfrom sympy import symbols, cos, diff\nimport math\nimport pandas\n\n# data = np.array(np.genfromtxt('SampleDataset.csv', delimiter='')) \ndata = pandas.read_csv('pt1.csv') # genfromtxt extracts data as a DataFrame, you will want to convert it into arrays for convenience.\n\ndata = data.drop(data.columns[[0]], axis=1)\ndata1 = data.values # Extracts only the values of interest\n\n\ndata = np.transpose(data1) # If you just want the array which contains all x data points, and y data points\n\n\nvi = np.array(data[0])\nvf = np.array(data[1])\n\ninituncertain = np.array(data[2])\n\nfinaluncertain = np.array(data[3])\n\nrestitution = []\ncount = 0\nfor ele in vi:\n float(ele)\n for x in vf:\n float(x)\n res = abs(x/ele)\n restitution.append(res)\n\n\nplt.bar(range(10), restitution, color ='maroon',\n width = 0.4)\n \ntotuncertain = []\nfor ele in vf:\n float(ele)\n \n for x in vf:\n float(x)\n for z in restitution:\n float(z)\n for y in finaluncertain:\n float(y)\n for v in inituncertain:\n float(v)\n uncertain = math.sqrt((z/ele)**2)*(v**2)+ ((z/x)**2)*(y**2)\n totuncertain.append(uncertain)\nn = len(vi)\n\nunweighted_mean = sum(restitution)/n\nunweighted_mean_error = math.sqrt((sum((totuncertain[i] - unweighted_mean)**2 for i in range(n)))/(n-1))\nstandard_error_uw= unweighted_mean_error/ math.sqrt(n)\nprint(totuncertain)\nprint(unweighted_mean)\nprint(unweighted_mean_error)\nprint(standard_error_uw)\n\n\n# Using different function, and defining marker and error bar colors, size etc..\n\n# Labels axis\n\n#plt.show()\n## The customization options are only limited by your imagination\n\ne_sub_w = []\nfor i in restitution:\n e_sub_w.append((i/(unweighted_mean**2))/(1/unweighted_mean**2)) \n\n\nsigma_sub_e = 1/pow(unweighted_mean**2,0.5)\n\n\n\nnumerator = 0 #sum((e_sub_j[i] / (e_sub_j_error[i])**2 )for i in range(n))\ndenominator = 0 #sum(1/ (e_sub_j_error[i])**2 for i in range(n))\nfor i in range(n):\n numerator += restitution[i] / (totuncertain[i])**2 \n denominator += 1/ (totuncertain[i])**2\nweighted_mean_e = numerator / denominator \n \nstandard_error_on_weighted_mean_e = sum(1/ totuncertain[i]**2 for i in range(n))**(-1/2)\nprint(standard_error_on_weighted_mean_e)\n\n\n# Sets plot size\nplt.figure(figsize=(10,6))\n# Using different function, and defining marker and error bar colors, size etc..\nplt.errorbar(restitution, vi, inituncertain, marker='x', ecolor='black',mec='red', linestyle='None',ms=4, mew=4)\n# Labels axis\nplt.xlabel('coefficient of restitution')\nplt.ylabel('Velocity inital(m$s^{-1}$)')\nplt.title('ej vs Vi')\n\nplt.figure(figsize=(10,6))\nplt.scatter(weighted_mean_e ,standard_error_on_weighted_mean_e)\nplt.show()", "repo_name": "tajhmartin1/PhsyicsLab", "sub_path": "lab1/pt1.py", "file_name": "pt1.py", "file_ext": "py", "file_size_in_byte": 2906, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 52, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 57, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "14854151193", "text": "from django.urls import path\r\nfrom main import views\r\n\r\nurlpatterns = [\r\n #main page render\r\n path('', views.renderPage, name='render_admin'),\r\n\r\n #summary page\r\n path('retrieve-summary/', views.displaySummary),\r\n\r\n #answers page\r\n path('display-questions/', views.displayQuestions),\r\n path('display-answers/', views.displayAnswers),\r\n\r\n #questions page\r\n path('save-question/', views.saveQuestion),\r\n path('delete-question/', views.deleteQuestion),\r\n path('display-edit-questions/', views.displayEditQuestions),\r\n #path('add-default/', views.addDefault),\r\n\r\n #users page\r\n path('display-users/', views.displayUsers),\r\n path('remove-user/', views.removeUser),\r\n \r\n #account page\r\n path('display-company/', views.displayCompany),\r\n path('reset-key/', views.resetKey),\r\n #path('change-password/', views.changePassword),\r\n #path('change-email/', views.changeEmail),\r\n path('change-name/', views.changeName),\r\n path('deactivate/', views.deactivateCompany),\r\n path('reactivate/', views.reactivateCompany),\r\n\r\n #logs user out of admin page\r\n path('logout/', views.logoutUser)\r\n]\r\n", "repo_name": "kalebweston100/Covid-Screening-App", "sub_path": "screening/main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "main.views.renderPage", "line_number": 6, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "main.views.displaySummary", "line_number": 9, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "main.views.displayQuestions", "line_number": 12, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "main.views.displayAnswers", "line_number": 13, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "main.views.saveQuestion", "line_number": 16, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "main.views.deleteQuestion", "line_number": 17, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "main.views.displayEditQuestions", "line_number": 18, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "main.views.displayUsers", "line_number": 22, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "main.views.removeUser", "line_number": 23, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "main.views.displayCompany", "line_number": 26, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "main.views.resetKey", "line_number": 27, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "main.views.changeName", "line_number": 30, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "main.views.deactivateCompany", "line_number": 31, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "main.views.reactivateCompany", "line_number": 32, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "main.views.logoutUser", "line_number": 35, "usage_type": "attribute"}, {"api_name": "main.views", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "281770033", "text": "'''\nAuthor: jyniki 1067087283@qq.com\nDate: 2022-05-30 16:22:48\nLastEditors: jyniki 1067087283@qq.com\nLastEditTime: 2022-05-30 20:25:52\nFilePath: /new_memae/lib/data/prepocess.py\nDescription: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\n'''\nfrom PIL import Image\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nroot = '/data0/JY/xxz/Anomaly/data/利群'\n\nimages_list_0 = []\nimages_list_1 = []\nimages_list_2 = []\nimages_list_3 = []\n\nfor first_path in os.listdir(root):\n if first_path == 'NG':\n first_path = os.path.join(root, first_path)\n for second_path in os.listdir(first_path):\n num_id = second_path\n second_path = os.path.join(first_path, second_path)\n for third_path in os.listdir(second_path):\n third_path = os.path.join(second_path, third_path)\n for image_path in os.listdir(third_path):\n image_path = os.path.join(third_path, image_path)\n \n if num_id == '0':\n images_list_0.append(image_path)\n elif num_id == '1':\n images_list_1.append(image_path)\n elif num_id == '2':\n images_list_2.append(image_path)\n elif num_id == '3':\n images_list_3.append(image_path) \n elif first_path == 'OK':\n first_path = os.path.join(root, first_path)\n for second_path in os.listdir(first_path):\n num_id = second_path\n second_path = os.path.join(first_path, second_path)\n for image_path in os.listdir(second_path):\n image_path = os.path.join(second_path, image_path)\n if num_id == '0':\n images_list_0.append(image_path)\n elif num_id == '1':\n images_list_1.append(image_path)\n elif num_id == '2':\n images_list_2.append(image_path)\n elif num_id == '3':\n images_list_3.append(image_path)\n\ncrop_box_0 = [98,30,765,395]\ncrop_box_1 = [70,38,740,411]\ncrop_box_2 = [98,229,1280,411]\ncrop_box_3 = [210,230,1050,750]\nfor image_path in images_list_0:\n image = Image.open(image_path)\n image_crop = image.crop(crop_box_0)\n image_path = image_path.replace('利群', 'cropped_image')\n image_crop.save(image_path)\n\nfor image_path in images_list_1:\n image = Image.open(image_path)\n image_crop = image.crop(crop_box_1)\n image_path = image_path.replace('利群', 'cropped_image')\n image_crop.save(image_path)\n\nfor image_path in images_list_2:\n image = Image.open(image_path)\n image_crop = image.crop(crop_box_2)\n image_path = image_path.replace('利群', 'cropped_image')\n image_crop.save(image_path)\n\n\nfor image_path in images_list_3:\n image = Image.open(image_path)\n image_crop = image.crop(crop_box_3)\n image_path = image_path.replace('利群', 'cropped_image')\n image_crop.save(image_path)\n", "repo_name": "xiaozhenxu/Federated-Memae", "sub_path": "lib/data/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 3112, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "33", "api": [{"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "13784846522", "text": "\nfrom __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\nimport datetime\nimport unicodecsv as csv\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/calendar-python-quickstart.json\nSCOPES = 'https://www.googleapis.com/auth/calendar.readonly'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Calendar API Python Quickstart'\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\"Exports a CSV file of a GET request to the Google Calendar API.\n\n To authorize this script, follow Step 1 from this page:\n https://developers.google.com/google-apps/calendar/quickstart/python\n\n This code is modified from the sample code in the Quickstart guide.\n\n See eventsResult for the details of the query.\n\n The CSV file contains columns which can be re-imported into Google Calendar per this page:\n https://support.google.com/calendar/answer/37118?hl=en\n\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n calendar_id = raw_input(\"Enter the calendar id, or 'primary' for your default calendar: \")\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 1000 events')\n eventsResult = service.events().list(\n calendarId=calendar_id,\n timeMin=now,\n maxResults=1000,\n singleEvents=True,\n showDeleted=True,\n orderBy='updated').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n\n\n with open('output.csv', 'wb') as csvfile:\n\n headers = [\n \"Subject\",\n \"Start Date\",\n \"End Date\",\n \"All Day Event\",\n \"Description\",\n \"Private\",\n \"NON-IMPORT ROWS ->\",\n \"Updated\",\n \"ID\"]\n\n writer = csv.writer(csvfile, delimiter=',', encoding='utf-8')\n writer.writerow(headers)\n\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n end = event['end'].get('dateTime', event['end'].get('date'))\n\n writer.writerow([\n event['summary'],\n start,\n end,\n \"True\",\n \"Re-imported 10/28/16 - status: \" + event['status'],\n \"False\",\n \"\",\n event['updated'],\n event['id']\n ])\n\n print(\"id: \", event['id'])\n print(\"start: \", start)\n print(\"end: \", end)\n print(\"summary: \", event['summary'])\n print(\"updated: \", event['updated'])\n print(\"status: \", event['status'])\n print(\"-----------\")\n\nif __name__ == '__main__':\n main()\n", "repo_name": "alexnitta/google-calendar-recovery", "sub_path": "quickstart.py", "file_name": "quickstart.py", "file_ext": "py", "file_size_in_byte": 4175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "oauth2client.tools.argparser", "line_number": 16, "usage_type": "attribute"}, {"api_name": "oauth2client.tools", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "oauth2client.file.Storage", "line_number": 43, "usage_type": "call"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 46, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 46, "usage_type": "name"}, {"api_name": "oauth2client.tools.run_flow", "line_number": 49, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 49, "usage_type": "name"}, {"api_name": "oauth2client.tools.run", "line_number": 51, "usage_type": "call"}, {"api_name": "oauth2client.tools", "line_number": 51, "usage_type": "name"}, {"api_name": "httplib2.Http", "line_number": 70, "usage_type": "call"}, {"api_name": "apiclient.discovery.build", "line_number": 71, "usage_type": "call"}, {"api_name": "apiclient.discovery", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}, {"api_name": "unicodecsv.writer", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "14558404348", "text": "import requests\nfrom bs4 import BeautifulSoup\n\ndef get_recs(url):\n \n plain_html_text = requests.get(url)\n soup = BeautifulSoup(plain_html_text.content, \"html.parser\")\n soup.prettify()\n content = soup.find_all('strong')\n record = [i.string for i in content if not (i.string).isdigit() ]\n return record\n", "repo_name": "LazyShuya/animePicker", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 322, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "20008215403", "text": "import openai\nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\n\nopenai_key = os.environ.get('openai')\nopenai.api_key = openai_key\n\n\n#모델 설정하기\nmodel = \"gpt-3.5-turbo\"\n# 질문 작성하기\nquery = input(\"질문을 알려주세요 : \")\n# 메시지 설정하기\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": query}\n]\n\n# ChatGPT API 호출하기\nresponse = openai.ChatCompletion.create(\n model=model,\n messages=messages\n)\nanswer = response['choices'][0]['message']['content']\nprint(answer)\n", "repo_name": "Shopping-n-joy/Shopping-N-Joy", "sub_path": "test_for_chatgpt.py", "file_name": "test_for_chatgpt.py", "file_ext": "py", "file_size_in_byte": 583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 4, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "openai.api_key", "line_number": 7, "usage_type": "attribute"}, {"api_name": "openai.ChatCompletion.create", "line_number": 21, "usage_type": "call"}, {"api_name": "openai.ChatCompletion", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "41059270308", "text": "#Extract and process tSx spotlight data\nimport os, tarfile, sys\nfrom datetime import datetime\nimport numpy as np\nfrom doris.doris_stack.main_code.jobs import Jobs\nfrom doris.doris_stack.main_code.resdata import ResData\nimport warnings\nfrom collections import Counter, OrderedDict\nfrom para_jobs import Para_Jobs\n\ndef export_files(tar_dir):\n tar_files = [k for i,j,k in os.walk(tar_dir)][0]\n for tar_file in tar_files:\n tar = tarfile.open(os.path.join(dir, tar_file), \"r:gz\")\n members = tar.getmembers()\n \n #files = tar.getnames()\n #xml_file = [i for i in files if (((i.split('/')[-1][:3]=='TDX') | (i.split('/')[-1][:3]=='TSX')) & (i.split('/')[-1][-4:]=='.xml'))]\n \n #for count, file in enumerate(files):\n for member in tar.getmembers():\n file = member.name\n if len(file.split('/'))==4:\n tsx_filename = file.split('/')[-2]\n if (((tsx_filename[:3]=='TDX') | (tsx_filename[:3]=='TSX')) & (file.split('/')[-1][-4:]!='.xml')):\n #print(file)\n #continue\n date = tsx_filename[28:36]\n print(date)\n date_obj = datetime.strptime(date, '%Y%m%d')\n file_path = '/'.join(file.split('/')[:-1])\n if ((date_obj >= datetime.strptime(start_date, '%Y%m%d')) & (date_obj <= datetime.strptime(end_date, '%Y%m%d'))):\n print(file_path + ' selected. Exporting file ')\n tar.extractall(path = export_path)\n else:\n print(file_path + ' : date out of range')\n\ndef make_folder_stack(export_path, doris_stack_dir):\n folder_list = [j for i,j,k in os.walk(export_path)][0]\n date_list = [folder[28:36] for folder in folder_list]\n stack_folder_list = [os.path.join(doris_stack_dir, date) for date in date_list]\n [os.mkdir(stack_folder) for stack_folder in stack_folder_list if not os.path.exists(stack_folder)]\n \n \ndef dump_slave_data(path, doris_stack_dir, folder_list, date_list, stack_folder_list, xml_file_list):\n \n for folder, xml_file, stack_folder in zip(folder_list, xml_file_list, stack_folder_list):\n xml_file_path = os.path.join(path, folder, xml_file)\n \n #dump header to doris\n os.system('python /home/anurag/Documents/PhDProject/doris/bin/tsx_dump_header2doris.py {} > {}/slave.res'.format(xml_file_path, stack_folder))\n \n cos_file_name = [k for i,j,k in os.walk(os.path.join(path, folder, 'IMAGEDATA'))][0][0]\n #Dump data\n os.system('python2 /home/anurag/Documents/PhDProject/doris/bin/tsx_dump_data.py {} {} -res {}/slave.res'.format(os.path.join(path, folder, 'IMAGEDATA',cos_file_name), os.path.join(stack_folder, 'slave.raw'), stack_folder))\n \n\ndef link_master(doris_stack_dir, date_list, master_date):\n #date_list.remove(master_date)\n #stack_folder_list\n #copy master res file\n master_res_orig = os.path.join(doris_stack_dir, master_date, 'slave.res')\n master_raw_orig = os.path.join(doris_stack_dir, master_date, 'slave.raw')\n \n for date in date_list:\n master_res = os.path.join(doris_stack_dir, date, 'master.res')\n master_raw = os.path.join(doris_stack_dir, date, 'master.raw')\n \n if not os.path.exists(master_res):\n os.system('cp {} {}'.format(master_res_orig, master_res))\n res = ResData(master_res)\n res.processes['crop']['Data_output_file'] = 'master.raw'\n res.write(new_filename = master_res)\n \n if not os.path.exists(master_raw):\n os.symlink(master_raw_orig, master_raw)\n \n \ndef coarse_orbits(date_list, doris_stack_dir, master_date, input_file_dir):\n \n joblist=[]\n for date in date_list:\n os.chdir(os.path.join(doris_stack_dir, date))\n os.system('doris {}/input.coarseorb'.format(input_file_dir))\n\n \ndef fake_fine_corr(date_list, master_date, stack_folder_list):\n \n coreg = OrderedDict()\n coreg['Initial offsets (l,p)'] = '0, 0'\n coreg['Window_size_L_for_correlation'] = '64'\n coreg['Window_size_P_for_correlation'] = '64'\n coreg['Max. offset that can be estimated'] = '32'\n coreg['Peak search ovs window (l,p)'] = '16 , 16'\n coreg['Oversampling factor'] = '32'\n coreg['Number_of_correlation_windows'] = '0'\n \n \n for date in date_list:\n \n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n print(date)\n if not res.process_control['fine_coreg'] == '1':\n res.insert(coreg,'fine_coreg')\n res.write(new_filename = ifg_res)\n\ndef dem_assist(date_list, master_date, input_files_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n if not res.process_control['dem_assist'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_files_path, 'input.dembased')\n jobList1.append({\"path\": path, \"command\": command1})\n\n \n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1)\n\n\ndef fake_coregpm(doris_stack_dir):\n \n coreg = OrderedDict()\n coreg['Degree_cpm'] = '0'\n coreg['Normalization_Lines'] = ''\n coreg['Normalization_Pixels'] = ''\n coreg['Estimated_coefficientsL'] = ''\n coreg['row_0'] = [\"{0:.8e}\".format(0), '0', '0']\n coreg['Estimated_coefficientsP'] = ''\n coreg['row_1'] = [\"{0:.8e}\".format(0), '0', '0']\n\n coreg['Deltaline_slave00_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltapixel_slave00_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltaline_slave0N_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltapixel_slave0N_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltaline_slaveN0_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltapixel_slaveN0_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltaline_slaveNN_poly'] = \"{0:.8e}\".format(0)\n coreg['Deltapixel_slaveNN_poly'] = \"{0:.8e}\".format(0)\n \n for date in date_list:\n ifg_res_file = os.path.join(doris_stack_dir, date, 'ifgs.res')\n mas_res_file = os.path.join(doris_stack_dir, date, 'master.res')\n \n ifg_res = ResData(ifg_res_file)\n mas_res = ResData(mas_res_file)\n \n lines = (int(mas_res.processes['crop']['Last_line (w.r.t. original_image)']) -\n int(mas_res.processes['crop']['First_line (w.r.t. original_image)']))\n pixels = (int(mas_res.processes['crop']['Last_pixel (w.r.t. original_image)']) -\n int(mas_res.processes['crop']['First_pixel (w.r.t. original_image)']))\n\n # Save pixels lines\n coreg['Normalization_Lines'] = \"{0:.8e}\".format(1) + ' ' + \"{0:.8e}\".format(lines)\n coreg['Normalization_Pixels'] = \"{0:.8e}\".format(1) + ' ' + \"{0:.8e}\".format(pixels)\n\n # Copy coregistration from full swath to burst\n if not ifg_res.process_control['comp_coregpm'] == '1':\n ifg_res.insert(coreg,'comp_coregpm')\n ifg_res.write(new_filename = ifg_res_file)\n\n\ndef resample(date_list, master_date, input_file_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n slv_res = os.path.join(doris_stack_dir, date, 'slave.res')\n res = ResData(slv_res)\n if not res.process_control['resample'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_file_path, 'input.resample')\n jobList1.append({\"path\": path, \"command\": command1})\n\n \n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1) \n \ndef interferogram(date_list, master_date, input_file_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n if not res.process_control['interfero'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_file_path, 'input.interferogram')\n jobList1.append({\"path\": path, \"command\": command1})\n\n \n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1) \n\n\n \ndef comp_refphase(date_list, master_date, input_file_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n if not res.process_control['comp_refphase'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_file_path, 'input.comprefpha')\n jobList1.append({\"path\": path, \"command\": command1})\n\n \n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1) \n\n\ndef subt_ref_phase(date_list, master_date, input_file_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n if not res.process_control['subtr_refphase'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_file_path, 'input.subtrrefpha')\n jobList1.append({\"path\": path, \"command\": command1})\n\n \n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1)\n\ndef comp_refdem(date_list, master_date, input_file_path, para_jobs_obj):\n jobList1 = []\n \n for date in date_list:\n ifg_res = os.path.join(doris_stack_dir, date, 'ifgs.res')\n res = ResData(ifg_res)\n if not res.process_control['comp_refdem'] == '1':\n path = os.path.join(doris_stack_dir, date)\n command1 = 'doris '+ os.path.join(input_file_path, 'input.comprefdem')\n jobList1.append({\"path\": path, \"command\": command1})\n\n jobs = Jobs(15, para_jobs_obj)\n jobs.run(jobList1)\n\ndef return_to_process(date_list, master_date, method):\n \n for date in date_list:\n os.system('/home/anurag/Documents/PhDProject/doris/bin/doris.rmstep.sh '+ method +' '+ os.path.join(doris_stack_dir, date, 'ifgs.res'))\n\ndef make_sink_animation():\n pass\n \nif __name__=='__main__':\n tar_dir = '/home/anurag/Documents/PhDProject/TS-X_wink_data/SLC_data'\n Jobs.id=0\n \n \n start_date = '20151001'\n end_date = '20160401'\n master_date = '20151221'#'20151005'#'20151210'\n\n export_path = '/home/anurag/Documents/PhDProject/TS-X_wink_data/sinkhole_epochs'\n \n #doris_stack_dir = '/media/anurag/SSD_1/anurag/PhD_Project/Doris_Processing/Doris_Processing_35_wink_spotlight/stack'\n doris_stack_dir = '/media/anurag/Seagate_badi_vaali/PhDProject/Doris_Processing/Doris_Processing_35_wink_TSx/stack'\n \n input_file_path = os.path.join(os.path.dirname(doris_stack_dir), 'input_files')\n \n folder_list = [j for i,j,k in os.walk(export_path)][0]\n date_list = [folder[28:36] for folder in folder_list]\n stack_folder_list = [os.path.join(doris_stack_dir, date) for date in date_list]\n xml_file_list = [i+'.xml' for i in folder_list]\n \n para_jobs_obj = Para_Jobs(doris_stack_dir)\n \n #date_list.remove(master_date)\n \n print(np.array([int(i) for i in sorted(date_list)]))\n \n print('Making folders')\n make_folder_stack(export_path, doris_stack_dir)\n print('dumping slave data')\n dump_slave_data(export_path, doris_stack_dir, folder_list, date_list, stack_folder_list, xml_file_list)\n print('linking master')\n link_master(doris_stack_dir, date_list, master_date)\n print('Coarse orbits')\n coarse_orbits(date_list, doris_stack_dir, master_date, input_file_path)\n print('Faking fine graduation')\n fake_fine_corr(date_list, master_date, stack_folder_list)\n print('DAC')\n dem_assist(date_list, master_date, input_file_path, para_jobs_obj)\n print('Faking Coreg PM')\n fake_coregpm(doris_stack_dir)\n print('Resampling')\n resample(date_list, master_date, input_file_path, para_jobs_obj)\n print('interferogram')\n interferogram(date_list, master_date, input_file_path, para_jobs_obj)\n print('Comp ref phase')\n comp_refphase(date_list, master_date, input_file_path, para_jobs_obj)\n print('Subt. ref phase')\n subt_ref_phase(date_list, master_date, input_file_path, para_jobs_obj)\n print('Comp ref dem')\n comp_refdem(date_list, master_date, input_file_path, para_jobs_obj)\n return_to_process(date_list, master_date, 'subtr_refphase')\n", "repo_name": "anurag-kulshrestha/Sink_InSAR_CNN_LSTM", "sub_path": "TSx_spotlight.py", "file_name": "TSx_spotlight.py", "file_ext": "py", "file_size_in_byte": 12579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "33", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 51, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 70, "usage_type": "call"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.symlink", "line_number": 76, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 84, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 120, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 148, "usage_type": "call"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 218, "usage_type": "call"}, {"api_name": "os.path", "line_number": 218, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path", "line_number": 221, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.resdata.ResData", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 240, "usage_type": "call"}, {"api_name": "os.system", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs.id", "line_number": 253, "usage_type": "attribute"}, {"api_name": "doris.doris_stack.main_code.jobs.Jobs", "line_number": 253, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 265, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "para_jobs.Para_Jobs", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "28544125915", "text": "import torch\nimport numpy as np\nimport torch.optim as optim\nimport torch.nn as nn\nfrom sklearn.model_selection import ParameterGrid\nfrom torch.utils.data import DataLoader, SubsetRandomSampler, Subset\nfrom torch.optim.lr_scheduler import StepLR\nimport datasets\nfrom weighted_random_search import wrs\nfrom tqdm import tqdm\nfrom CNNpy import CNN_3_class\n\n\nclass Net_wrapper:\n \"\"\" \n Wrapper for neural network model. It combines the model itself (nn.Module) together with\n optimizer, learning rate scheduler, loss function and other\n training parameters (such as max_epochs, learning rate and batch size). \n It allows to perform training of neural network simply by creating instance of Net_wrapper and \n running `score` method. Compatible with grid search and random search classes.\n \"\"\"\n \n def __init__(self, model=CNN_3_class, criterion=nn.CrossEntropyLoss(), optimizer=optim.Adam, weight_decay = 0,\n max_epochs=5, batch_size=32, learning_rate=0.001, step_size=10, gamma=0.5, **kwargs):\n \"\"\"\n Args:\n model: Neural network model (e. g. torch.nn.Module)\n criterion: loss function\n optimizer: optimizer used for minimizing the loss function\n weight_decay: strength of weight regularization\n max_epochs: number of epochs for training\n batch_size: size of batches feeding the neural network\n learning_rate: learning rate\n step_size: number of epochs, after which the learning rate drops down\n gamma: parameter multiplied by learning rate after 'step_size' number of epochs.\n **kwargs: other parameters of the model. Note: the key of parameter must correspond to exact name of \n attribute in model. For example, if model has attribute 'number_of_filters', the passed parameter should \n have the exact same name, 'number of filters'.\n \"\"\"\n if kwargs:\n self.model_params = kwargs\n else:\n self.model_params = {}\n self.model = model\n self.criterion = criterion\n self.optimizer = optimizer\n self.max_epochs = max_epochs\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.scheduler_step_size=step_size\n self.scheduler_gamma=gamma\n self.weight_decay = weight_decay\n \n def __setattr__(self, name, value):\n self.__dict__[name] = value\n\n def score(self, train_dataset, val_dataset, verbose=0):\n \"\"\"\n Method implementing forward-backward propagation loop and computing accuracy on training and validation sets.\n Args:\n train_dataset: PyTorch dataset of training data\n val_dataset: PyTorch dataset of validation data\n verbose: if 1, additional information is printed after each epoch such as training/validation loss/accuracy\n Returns:\n Training accuracy, training loss, validation accuracy and validation loss after full training. \n \"\"\"\n if self.model_params:\n model = self.model(**self.model_params)\n \n else:\n model = self.model()\n if self.optimizer==optim.SGD:\n optimizer = self.optimizer(model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay, momentum=0.9)\n else:\n optimizer = self.optimizer(model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n scheduler = StepLR(optimizer, step_size=self.scheduler_step_size, gamma=self.scheduler_gamma)\n \n train_loader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)\n val_loader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)\n\n \n # Forward-backward propagation loop\n for epoch in range(self.max_epochs):\n \n if verbose == 1:\n print(f'Epoch {epoch+1}/{self.max_epochs}')\n t_loader = tqdm(train_loader)\n v_loader = tqdm(val_loader)\n else:\n t_loader = train_loader\n v_loader = val_loader\n \n # Train the model\n avg_train_accuracy = 0\n avg_train_loss = 0\n \n for x, y in t_loader:\n optimizer.zero_grad()\n\n output = model(x)\n train_loss = self.criterion(output, y)\n train_loss.backward()\n optimizer.step()\n\n train_accuracy = (output.argmax(dim=1) == y).float().mean()\n\n avg_train_accuracy += train_accuracy.item()\n avg_train_loss += train_loss.item()\n \n # Calculate average training loss and accuracy for the epoch\n avg_train_accuracy = avg_train_accuracy / len(train_loader)\n avg_train_loss = avg_train_loss / len(train_loader)\n \n if verbose == 1:\n print(f'train_acc:{avg_train_accuracy}, train_loss: {avg_train_loss}')\n\n # Test the model\n avg_val_loss = 0\n avg_val_accuracy = 0\n \n with torch.no_grad():\n for x, y in v_loader:\n output = model(x)\n val_loss = self.criterion(output, y)\n val_accuracy = (output.argmax(dim=1) == y).float().mean()\n avg_val_loss += val_loss.item()\n avg_val_accuracy += val_accuracy.item()\n\n # Calculate average test loss and accuracy for the epoch\n avg_val_accuracy = avg_val_accuracy / len(val_loader)\n avg_val_loss = avg_val_loss / len(val_loader)\n\n if verbose == 1:\n print(f'val_acc:{avg_val_accuracy}, val_loss: {avg_val_loss}')\n\n scheduler.step()\n \n self.model = model\n \n return avg_train_accuracy, avg_train_loss, avg_val_accuracy, avg_val_loss\n\n\nclass RandomSearch:\n \"\"\"\n Class implementing classic random search over the space of parameters.\n \"\"\"\n def __init__(self, net: Net_wrapper, param_grid, verbose=1):\n \"\"\"\n Args:\n net: Net_wrapper instance\n param_grid: dictionary of parameters that we search for. \n verbose: if 1, print additional information after each trial, \n such as set of parameters that was checked and model accuracy on chosen set of parameters.\n \"\"\"\n self.net = net\n self.param_grid = param_grid\n self.scores = []\n self.best_score = 0\n self.best_params = {}\n self.verbose = verbose\n\n \n @staticmethod\n def choose_random__params(parameters, seed=1):\n \"\"\"\n Helper function used for choosing parameter set at random.\n Args:\n parameters: parameter dictionary.\n seed: seed of random state.\n \"\"\"\n random_params = {}\n rnd = np.random.RandomState(seed)\n \n for param in parameters:\n\n if isinstance(parameters[param][0], float):\n random_params[param] = rnd.uniform(low=parameters[param][0], high=parameters[param][1])\n elif isinstance(parameters[param][0], int):\n random_params[param] = rnd.randint(low=parameters[param][0], high=parameters[param][1])\n else:\n random_params[param] = parameters[param][rnd.randint(0, len(parameters[param]))]\n \n return random_params\n\n \n def fit(self, train_dataset, val_dataset, n_trials = 10):\n \"\"\"\n Fit the random search with train and validation dataset. \n Search for optimal parameters for neural network declared during \n initialization of RandomSearch instance.\n Args:\n train_dataset: PyTorch Dataset of training data\n cal_dataset: PyTprch Dataset of validation data\n n_trials: number of trials performed during random search.\n \n Returns:\n self\n \"\"\"\n \n for trial in range(n_trials):\n random_params = RandomSearch.choose_random__params(parameters=self.param_grid, seed=trial)\n for hyp_name, hyp_val in random_params.items():\n if hasattr(self.net, hyp_name):\n setattr(self.net, hyp_name, hyp_val)\n else:\n self.net.model_params[hyp_name] = hyp_val\n\n _, _, val_accuracy, _ = self.net.score(train_dataset, val_dataset)\n self.scores.append(val_accuracy)\n if val_accuracy > self.best_score:\n self.best_score = val_accuracy\n self.best_params = random_params\n if self.verbose == 1:\n print('Parameter set:', random_params)\n print(f'val_accuracy: {val_accuracy:.4f}')\n\n return self\n\n\nclass GridSearch:\n \"\"\"\n Class used to perform grid search on set of parameters. \n \"\"\"\n def __init__(self, net: Net_wrapper, param_grid, step_by_step=False, verbose=1):\n \"\"\"\n Args:\n net: Net_wrapper instance\n param_grid: dictionary of parameters\n step_by_step: if False normal grid search is performed, if True each parameter is evaluated step by step \n (not all the parameters together)\n verbose: if 1, additional information (parameter set and accuracy) prints with each iteration of grid search. \n \"\"\"\n self.net = net\n self.param_grid = param_grid\n self.scores = []\n self.best_score = 0\n self.best_params = {}\n self.step_by_step = step_by_step\n self.verbose = verbose\n\n\n def fit(self, train_dataset, val_dataset):\n \"\"\"\n Fit the grid search with train and validation dataset. \n Search for optimal parameters for neural network declared during \n initialization of GridSearch instance.\n Args:\n train_dataset: PyTorch instance of train data\n val_dataset: PyTorch instance of validation data\n \"\"\"\n if self.step_by_step==False:\n for params in ParameterGrid(self.param_grid):\n for hyp_name, hyp_val in params.items():\n if hasattr(self.net, hyp_name):\n setattr(self.net, hyp_name, hyp_val)\n else:\n self.net.model_params[hyp_name] = hyp_val\n\n _, _, val_accuracy, _ = self.net.score(train_dataset, val_dataset)\n self.scores.append(val_accuracy)\n if val_accuracy > self.best_score:\n self.best_score = val_accuracy\n self.best_params = params\n if self.verbose == 1:\n print('Parameter set:', params)\n print(f'val_accuracy: {val_accuracy:.4f}')\n else:\n \n for hyp_name, hyp_vals in self.param_grid.items():\n score = 0\n for hyp_val in hyp_vals:\n if hasattr(self.net, hyp_name):\n setattr(self.net, hyp_name, hyp_val)\n else:\n self.net.model_params[hyp_name] = hyp_val \n\n _, _, val_accuracy, _ = self.net.score(train_dataset, val_dataset)\n self.scores.append(val_accuracy)\n \n if val_accuracy > score:\n if score > self.best_score:\n self.best_score = score\n score = val_accuracy\n self.best_params[hyp_name] = hyp_val\n \n if self.verbose == 1:\n print(f'Current parameter: {hyp_name}:', hyp_val, f' val_accuracy: {val_accuracy:.4f}')\n print(f'Best parameters till now:{self.best_params}')\n \n if hasattr(self.net, hyp_name):\n setattr(self.net, hyp_name, self.best_params[hyp_name])\n print(getattr(self.net, hyp_name))\n else:\n self.net.model_params[hyp_name] = self.best_params[hyp_name]\n\n return self\n \n\nclass WeightedRandomSearch():\n \"\"\"\n Class used to perform weighted random search on neural networks.\n Not completed - unable to use fANOVA package. \n\n Attributes:\n self.net - Net_wrapper instance\n self.param_grid - dictionary of parameters we want to search\n self.scores - list for scores of each set of parameters\n self.best_score - best score out of all parameters\n self.best_params - best set of parameters\n self.verbose - if set to 1 additional information (parameter set and accuracy) prints with each iteration of grid search. \n \"\"\"\n def __init__(self, net, param_grid, verbose=1):\n \"\"\"\n Args:\n net - Net_wrapper instance\n param_grid - dictionary of parameters we want to search\n verbose - if set to 1 additional information (parameter set and accuracy) prints with each iteration of grid search. \n \"\"\"\n self.net = net\n self.param_grid = param_grid\n self.scores = []\n self.best_score = 0\n self.best_params = None\n self.verbose = verbose\n\n def fit(self, train_dataset, val_dataset, N, N_0):\n \"\"\"\n Fit the grid search with train and validation dataset. \n Search for optimal parameters for neural network declared during \n initialization of GridSearch instance.\n \"\"\"\n def goal_function(params):\n \n for hyp_name, hyp_val in params.items():\n if hasattr(self.net, hyp_name):\n setattr(self.net, hyp_name, hyp_val)\n else:\n self.net.model_params[hyp_name] = hyp_val\n \n return self.net.score(train_dataset, val_dataset)[1]\n \n self.best_params, self.best_score = wrs(F=goal_function, N=N, N_0=N_0, param_grid=self.param_grid )\n \n return self\n \n# GridSearch and RandomSearch tests\n\nif __name__==\"__main__\":\n \n train_dataset = datasets.cifar_train\n val_dataset = datasets.cifar_val\n\n subset_indices = list(range(500))\n subset_sampler = SubsetRandomSampler(subset_indices)\n\n subset_train_dataset = Subset(train_dataset, subset_indices)\n subset_val_dataset = Subset(val_dataset, subset_indices)\n\n test_hyper_params = {'learning_rate': [0.0001, 0.0005, 0.001, 0.005, 0.01], 'batch_size': [8, 16, 32, 64, 128]}\n my_net = Net_wrapper(model=CNN_3_class, max_epochs=5)\n \n gs = GridSearch(net=my_net, param_grid=test_hyper_params, step_by_step=True, verbose=1)\n gs = gs.fit(subset_train_dataset, subset_val_dataset)\n\n print(gs.best_score)\n print(gs.best_params)\n\n rs = RandomSearch(my_net, test_hyper_params, verbose=1)\n rs.fit(subset_train_dataset, subset_val_dataset, n_trials = 5)\n\n print(rs.best_score)\n print(rs.best_params)\n", "repo_name": "mikolajzalewski/Deep_Learning", "sub_path": "CNN/hyperparameter_search.py", "file_name": "hyperparameter_search.py", "file_ext": "py", "file_size_in_byte": 14755, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "CNNpy.CNN_3_class", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 79, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 87, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 172, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.ParameterGrid", "line_number": 252, "usage_type": "call"}, {"api_name": "weighted_random_search.wrs", "line_number": 342, "usage_type": "call"}, {"api_name": "datasets.cifar_train", "line_number": 350, "usage_type": "attribute"}, {"api_name": "datasets.cifar_val", "line_number": 351, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SubsetRandomSampler", "line_number": 354, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 357, "usage_type": "call"}, {"api_name": "CNNpy.CNN_3_class", "line_number": 360, "usage_type": "name"}]} +{"seq_id": "27743985597", "text": "# Imports\nimport sys\nfrom stock_list import codes\nfrom datetime import datetime\nimport json\nimport pymongo\nimport pandas as pd\nfrom logger import error_logger, info_logger, warning_logger\n\n# database Connection\n# connection_url = 'mongodb+srv://admin:admin@cluster0.70gug.mongodb.net/exercise-tracker?retryWrites=true&w=majority'\nconnection_url = 'mongodb://localhost:27017/'\nclient = pymongo.MongoClient(connection_url)\ndatabase = client.get_database('stock-analyzer')\nstocks_table = database.stocks\n\n# Constants\nCOMM_RATE = 1.19\nSTART_DATE = 1451595600\n\n# Global\nall_stats = []\ncapital = 100000\n\n\ndef first_breakout(stocks, cur_pos):\n i = 0\n entry_point = stocks[cur_pos]['close']\n risk = compute_profit(entry_point, stocks[cur_pos]['alma'])\n\n while True:\n if i is not 0 and cur_pos is not 0:\n prev_candle = stocks[cur_pos-i]\n if close_below_alma(prev_candle):\n break\n if is_above(prev_candle['close'], prev_candle['alma']) and low_below_alma(prev_candle):\n risk = compute_profit(entry_point, prev_candle['alma'])\n i += 1\n\n return risk\n\n\ndef calculate_win_rate(code, txns):\n if len(txns) == 0:\n return {\n \"code\": code,\n \"win_rate\": 0,\n \"wins\": 0,\n \"avg_win\": 0,\n \"max_win\": 0,\n \"loss\": 0,\n \"avg_loss\": 0,\n \"max_loss\": 0,\n \"total_trade\": 0,\n \"total\": 0,\n \"total_capital\": 0\n }\n\n avg_win = 0\n avg_loss = 0\n df = pd.DataFrame(txns)\n has_open_position = False\n loss = 0\n total = 0\n valid_txns = len(txns)\n win_rate = 0\n wins = 0\n winning_trade = 0\n\n # For scenario where:\n # there is only one transaction and\n # it is still open\n try:\n max_loss = df['pnl'].min() < 0 and df['pnl'].min() or 0\n max_win = df['pnl'].max() > 0 and df['pnl'].max() or 0\n except:\n max_loss = 0\n max_win = 0\n\n for txn in txns:\n try:\n pnl = txn['pnl']\n total += pnl\n if pnl > 0:\n winning_trade += 1\n wins += pnl\n else:\n loss += pnl\n except:\n has_open_position = True\n info_logger('Open position')\n\n valid_txns = has_open_position and (\n valid_txns - 1) or valid_txns\n\n if winning_trade is not 0:\n win_rate = round(winning_trade/valid_txns * 100, 2)\n avg_win = wins != 0 and round(wins/winning_trade, 2) or 0\n avg_loss = loss != 0 and round(loss/(valid_txns-winning_trade), 2) or 0\n\n return {\n \"code\": code,\n \"win_rate\": win_rate,\n \"wins\": winning_trade,\n \"avg_win\": avg_win,\n \"max_win\": max_win,\n \"loss\": valid_txns - winning_trade,\n \"avg_loss\": avg_loss,\n \"max_loss\": max_loss,\n \"total_trade\": valid_txns,\n \"total\": round(total, 2),\n \"total_capital\": round(capital, 2)}\n\n\ndef candle_above(stock, indicator):\n return stock['open'] > indicator and stock['close'] > indicator\n\n\ndef candle_below(stock, indicator):\n return stock['open'] < indicator and stock['close'] < indicator\n\n\ndef close_below_alma(stock):\n \"\"\"\n Identify if close price is below ALMA.\n :param stock: Stock object\n :return: boolean\n \"\"\"\n if stock['alma'] is not None:\n return stock['close'] < stock['alma']\n\n\ndef compute_profit(buy_price, sell_price):\n return (((sell_price - buy_price) / buy_price) * 100)\n\n\ndef compute_pnl(txn, txns):\n return round(compute_profit(txns[-1:][0]['buy_price'], txn['sell_price']) - COMM_RATE, 2)\n\n\ndef convert_timestamp(timestamp):\n return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')\n\n\ndef display_stats(strategy, stats):\n print('\\n{} strategy'.format(strategy))\n print('Win rate: {}% \\nTotal Trade: {} \\nWins: {} \\nAverage Win: {}%\\nMax Win: {}%\\nLoss: {} \\nAverage Loss: {}%\\nMax Loss: {}%\\nTotal: {}%\\nTotal Capital: {}'\n .format(\n stats['win_rate'],\n stats['total_trade'],\n stats['wins'],\n stats['avg_win'],\n stats['max_win'],\n stats['loss'],\n stats['avg_loss'],\n stats['max_loss'],\n stats['total'],\n stats['total_capital']))\n\n\ndef fetch_all_stocks():\n \"\"\"\n Fetch all stocks.\n \"\"\"\n return stocks_table.find()\n\n\ndef fetch_stocks(code):\n \"\"\"\n Fetch stocks based on stock code.\n \"\"\"\n return list(stocks_table.find({\"code\": code}))\n\n\ndef get_filename():\n return input('Enter filename: ')\n\n\ndef get_previous_values(stocks, cur_pos, length):\n prev_values = []\n\n for i in range(length+1):\n if i is not 0:\n if cur_pos > length+1:\n prev_values.append(stocks[cur_pos-i]['value'])\n else:\n prev_values.append(0)\n\n return prev_values\n\n\ndef get_stats(code, txns):\n all_stats.append(calculate_win_rate(code, txns))\n\n\ndef is_above(above, below):\n return above > below\n\n\ndef is_below(below, above):\n return below < above\n\n\ndef is_green_candle(stock):\n return stock['close'] > stock['open']\n\n\ndef low_below_alma(stock):\n if stock['alma'] is not None:\n return stock['low'] < stock['alma']\n\n\ndef main():\n action = ''\n configs = ''\n txns = []\n filename = ''\n setup = []\n strat_name = ''\n stocks = []\n\n action = input(\n '[1-Backtest] [2-Trade Analysis]: ')\n strat = input(\n '[1 - MAMA] [2 - DOUBLE CROSS]: ')\n code = input('Stock symbol: ')\n save = save_file()\n\n code = code == '' and codes or code.upper()\n stocks = code == 'ALL' and codes or [code]\n\n if save:\n filename = get_filename()\n\n if strat == '1':\n strat_name = 'MAMA'\n if action == '2':\n configs = ['mama.analyze.config.json']\n else:\n configs = ['mama.config.json']\n elif strat == '2':\n strat_name = 'DOUBLE CROSS'\n configs = ['double_cross.config.json']\n elif strat.upper() == 'ALL':\n strat_name = 'ALL'\n configs = ['mama.config.json', 'double_cross.config.json']\n else:\n print('No strategy like that yet')\n\n for config in configs:\n with open('{}'.format(config)) as file:\n setup.append(json.loads(file.read()))\n\n info_logger('Starting {} test'.format(strat_name))\n txns = analyzer(setup, stocks, action)\n info_logger('End {} test'.format(strat_name))\n\n if code == 'ALL':\n get_stats(code, txns)\n\n display_report(strat_name, code, txns, save, filename)\n show_parameters('{}'.format(strat_name), code, save, filename)\n\n\ndef display_report(name, code, txns, save, filename):\n if len(txns) != 0:\n stats = calculate_win_rate(code != 'ALL' and code or 'ALL', txns)\n txnsdf = pd.DataFrame(txns)\n txnsdf.sort_values(['code', 'buy_date'], ascending=True,\n inplace=True, na_position='last')\n txnsdf['pnl'] = txnsdf['pnl'].astype(str) + '%'\n statsdf = pd.DataFrame(all_stats)\n statsdf['win_rate'] = statsdf['win_rate'].astype(str) + '%'\n statsdf['max_win'] = statsdf['max_win'].astype(str) + '%'\n statsdf['avg_win'] = statsdf['avg_win'].astype(str) + '%'\n statsdf['max_loss'] = statsdf['max_loss'].astype(str) + '%'\n statsdf['avg_loss'] = statsdf['avg_loss'].astype(str) + '%'\n statsdf.sort_values(['total'], ascending=True,\n inplace=True, na_position='last')\n statsdf['total'] = statsdf['total'].astype(str) + '%'\n pd.set_option('display.max_rows', 10000)\n\n if save:\n save_report(filename, txnsdf, statsdf)\n\n info_logger(txnsdf)\n info_logger(statsdf)\n display_stats('{}'.format(name), stats)\n\n\ndef save_report(filename, txns, stats):\n with pd.ExcelWriter('results/{0}.xlsx'.format(filename)) as writer: # pylint: disable=abstract-class-instantiated\n stats.to_excel(writer, sheet_name='Summary')\n txns.to_excel(writer, sheet_name='Details')\n\n\ndef analyzer(setup, stocks, action):\n buy = []\n risk = []\n sell = []\n stop = 0\n trail_stop = []\n txn = []\n txns = []\n\n for stock_code in stocks:\n info_logger('Starting test of {}'.format(stock_code))\n\n for strat in setup:\n try:\n buy = strat['buy']\n sell = strat['sell']\n risk = strat['risk']\n stop = strat['stop']\n trail_stop = strat['trail_stop']\n except:\n error_logger('Error in configuration file')\n\n if action == '1':\n txn = backtest(stock_code, buy, sell,\n risk, stop, trail_stop)\n else:\n txn = analyze(stock_code, buy, sell,\n risk, stop, trail_stop)\n\n get_stats(stock_code, txn)\n txns = txns + txn\n\n info_logger('End of test of {}'.format(stock_code))\n\n return txns\n\n\ndef valid_previous_values(prev_values, target_prev_value):\n invalid_ctr = 0\n valid = True\n\n for value in prev_values:\n if not is_above(value, target_prev_value):\n invalid_ctr += 1\n if invalid_ctr > 1:\n valid = False\n\n return valid\n\n\ndef backtest(code, buy_criteria, sell_criteria, risk_criteria, stoploss, trail_stop):\n stock_data = fetch_stocks(code)\n buy = True\n i = 0\n\n txns = []\n trade_capital = 0\n global capital\n\n for candle in stock_data:\n action = buy and 'BUY' or 'SELL'\n prev_candle = stock_data[i-1]\n\n if (prev_candle['alma'] is not None\n and prev_candle['macd'] is not None\n and prev_candle['ma20'] is not None\n and prev_candle['volume20'] is not None):\n\n if candle['timestamp'] >= START_DATE:\n\n # Variables for eval\n # pylint: disable=unused-variable\n alma = candle['alma']\n close = candle['close']\n ma20 = candle['ma20']\n macd = candle['macd']\n macds = candle['macds']\n prev_alma = prev_candle['alma']\n prev_close = prev_candle['close']\n prev_macd = prev_candle['macd']\n prev_macds = prev_candle['macds']\n prev_ma20 = prev_candle['ma20']\n prev_values = get_previous_values(stock_data, i, 5)\n value = candle['value']\n volume = candle['volume']\n volume20 = candle['volume20']\n\n # BUYING STOCK\n if buy:\n valid = False\n for condition in buy_criteria:\n valid = True\n valid = eval(condition)\n if not valid:\n break\n\n if valid:\n for condition in risk_criteria:\n valid = eval(condition)\n if not valid:\n break\n\n if valid:\n trade_capital = capital * 0.08\n txn = trade(candle, action, trade_capital)\n txn['candle'] = is_green_candle(\n candle) and 'Green' or 'Red'\n txn['above_ma20'] = is_above(\n close, ma20) and 'Yes' or 'No'\n txns.append(txn)\n capital += -(trade_capital)\n buy = not buy\n else:\n exit_criteria = ''\n paperloss = compute_profit(\n txns[len(txns)-1]['buy_price'], close)\n stop = compute_profit(prev_close, close)\n valid = False\n\n if int(stoploss) != 0 and int(stoploss) >= paperloss:\n buy_price = txns[len(txns)-1]['buy_price']\n cut_price = round(buy_price -\n (buy_price * abs(int(stoploss))/100), 2)\n\n if cut_price >= candle['low'] and cut_price <= candle['high']:\n exit_criteria = 'STOPLOSS'\n candle['close'] = cut_price\n valid = True\n\n elif len(trail_stop) != 0:\n for condition in trail_stop:\n exit_criteria = 'TRAIL STOP'\n valid = True\n valid = eval(condition)\n if not valid:\n break\n\n if not valid:\n for condition in sell_criteria:\n exit_criteria = 'NORMAL EXIT'\n valid = True\n valid = eval(condition)\n if not valid:\n break\n\n if valid:\n txn = trade(candle, action)\n pnl = compute_pnl(txn, txns)\n amount = round(trade_capital * (pnl/100), 2)\n txns[len(txns)-1]['sell_date'] = txn['sell_date']\n txns[len(txns)-1]['sell_price'] = txn['sell_price']\n txns[len(txns)-1]['pnl'] = pnl\n txns[len(txns)-1]['exit'] = exit_criteria\n txns[len(txns)-1]['amount'] = amount\n capital += trade_capital + amount\n buy = not buy\n\n i += 1\n\n return txns\n\n\ndef analyze(code, buy_criteria, sell_criteria, risk_criteria, stoploss, trail_stop):\n stock_data = fetch_stocks(code)\n buy = True\n buy_date = ''\n i = 0\n\n txns = []\n trade_capital = 0\n global capital\n\n for candle in stock_data:\n action = 'BUY'\n prev_candle = stock_data[i-1]\n j = 0\n\n if (prev_candle['alma'] is not None\n and prev_candle['macd'] is not None\n and prev_candle['ma20'] is not None\n and prev_candle['volume20'] is not None):\n\n if candle['timestamp'] >= START_DATE:\n\n # Variables for eval\n # pylint: disable=unused-variable\n alma = candle['alma']\n close = candle['close']\n ma20 = candle['ma20']\n macd = candle['macd']\n macds = candle['macds']\n prev_alma = prev_candle['alma']\n prev_close = prev_candle['close']\n prev_macd = prev_candle['macd']\n prev_macds = prev_candle['macds']\n prev_ma20 = prev_candle['ma20']\n prev_values = get_previous_values(stock_data, i, 5)\n value = candle['value']\n volume = candle['volume']\n volume20 = candle['volume20']\n\n # BUYING STOCK\n if buy:\n valid = len(buy_criteria) == 0 and True or False\n\n # Check buy criteria\n for condition in buy_criteria:\n valid = eval(condition)\n if not valid:\n break\n\n # Check risk condition\n if valid:\n for condition in risk_criteria:\n valid = eval(condition)\n if not valid:\n break\n\n if valid:\n trade_capital = capital * 0.08\n txn = trade(candle, action, trade_capital)\n txn['candle'] = is_green_candle(\n candle) and 'Green' or 'Red'\n txn['above_ma20'] = is_above(\n close, ma20) and 'Yes' or 'No'\n txns.append(txn)\n capital += -(trade_capital)\n buy = not buy\n buy_date = candle['timestamp']\n else:\n for sell_candle in stock_data:\n action = 'SELL'\n sell_prev_candle = stock_data[j-1]\n\n # Variables for eval\n # pylint: disable=unused-variable\n alma = sell_candle['alma']\n close = sell_candle['close']\n ma20 = sell_candle['ma20']\n macd = sell_candle['macd']\n macds = sell_candle['macds']\n prev_alma = sell_prev_candle['alma']\n prev_close = sell_prev_candle['close']\n prev_macd = sell_prev_candle['macd']\n prev_macds = sell_prev_candle['macds']\n prev_ma20 = sell_prev_candle['ma20']\n prev_values = get_previous_values(stock_data, j, 5)\n value = sell_candle['value']\n volume = sell_candle['volume']\n volume20 = sell_candle['volume20']\n\n if sell_candle['timestamp'] > buy_date:\n exit_criteria = ''\n paperloss = compute_profit(\n txns[len(txns)-1]['buy_price'], close)\n stop = compute_profit(prev_close, close)\n valid = False\n\n if int(stoploss) != 0 and int(stoploss) >= paperloss:\n buy_price = txns[len(txns)-1]['buy_price']\n cut_price = round(buy_price -\n (buy_price * abs(int(stoploss))/100), 2)\n\n if cut_price >= candle['low'] and cut_price <= candle['high']:\n exit_criteria = 'STOPLOSS'\n candle['close'] = cut_price\n valid = True\n\n elif len(trail_stop) != 0:\n for condition in trail_stop:\n exit_criteria = 'TRAIL STOP'\n valid = True\n valid = eval(condition)\n if not valid:\n break\n\n if not valid:\n for condition in sell_criteria:\n exit_criteria = 'NORMAL EXIT'\n valid = True\n valid = eval(condition)\n if not valid:\n break\n\n if valid:\n txn = trade(sell_candle, action)\n pnl = compute_pnl(txn, txns)\n amount = round(trade_capital * (pnl/100), 2)\n txns[len(txns)-1]['sell_date'] = txn['sell_date']\n txns[len(txns) -\n 1]['sell_price'] = txn['sell_price']\n txns[len(txns)-1]['pnl'] = pnl\n txns[len(txns)-1]['exit'] = exit_criteria\n txns[len(txns)-1]['amount'] = amount\n capital += trade_capital + amount\n buy = not buy\n break\n\n j += 1\n\n i += 1\n\n return txns\n\n\ndef previous_breakout_candle(stock, indicator):\n return stock['open'] <= indicator and stock['close'] >= indicator\n\n\ndef save_file():\n save = input('Save result? [Y/N]: ')\n save = save.upper()\n return save == 'Y' and True or False\n\n\ndef show_parameters(strategy, stock, save_file, filename=''):\n print('\\nPARAMETERS USED IN THIS TEST \\nStrategy: {0} \\nStock: {1} \\nSave File: {2} \\nFilename: {3}'\n .format(\n strategy, stock, save_file, filename\n ))\n\n\ndef stock_to_test():\n code = input('Enter stock to test: ')\n return code != '' and code.upper() or ''\n\n\ndef trade(stock, action, trade_capital=0):\n txn = {}\n\n if action == 'BUY':\n txn = {\"code\": stock['code'], \"buy_date\": convert_timestamp(stock['timestamp']),\n \"buy_price\": stock['close'], \"bought_shares\": int(trade_capital/stock['close'])}\n else:\n txn = {\"code\": stock['code'], \"sell_date\": convert_timestamp(stock['timestamp']),\n \"sell_price\": stock['close']}\n\n return txn\n\n\nmain()\n", "repo_name": "carloadamos/stock-analyzer", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 20982, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pymongo.MongoClient", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 142, "usage_type": "name"}, {"api_name": "stock_list.codes", "line_number": 229, "usage_type": "name"}, {"api_name": "stock_list.codes", "line_number": 230, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 252, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 254, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 256, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 268, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 272, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 281, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 286, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 287, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 292, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 307, "usage_type": "call"}, {"api_name": "logger.error_logger", "line_number": 317, "usage_type": "call"}, {"api_name": "logger.info_logger", "line_number": 329, "usage_type": "call"}]} +{"seq_id": "73744261533", "text": "import json\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport streamlit as st\n\n\ndef save_as_default_button(default, default_file_path):\n if st.button(\"Save as Default\", key=default_file_path):\n with open(default_file_path, \"w\") as f:\n json.dump(default, f)\n\n\ndef create_multiselect_box(df, value_col, label_col, default_file_path):\n # Get the default value from session state or set an empty list as the default\n try:\n with open(default_file_path, \"r\") as f:\n default_value = json.load(f)\n except FileNotFoundError:\n default_value = []\n\n # Filter out the values with zero amount, so we only have the real expenses\n values = df.loc[df[value_col] != 0, label_col].unique()\n values = sorted(values)\n\n # Create a multiselect box with the filtered options and default value\n\n try:\n selected = st.multiselect(\n f\"Select {value_col} to drop\",\n values,\n default=st.session_state.get(f\"{value_col}_to_drop\", default_value),\n )\n\n save_as_default_button(selected, default_file_path)\n\n return selected\n\n except st.errors.StreamlitAPIException:\n st.error(\n f\"\"\" You can not use the defaults of another bank account. \n Please delete the file {default_file_path}\"\"\"\n )\n return []\n\n\ndef colorize(value, is_expenses=False):\n if value > 0:\n if not is_expenses:\n return f'{value:,.2f}'\n else:\n return f'{value:,.2f}'\n elif value < 0:\n if not is_expenses:\n return f'{value:,.2f}'\n else:\n return f'{value:,.2f}'\n else:\n return f'{value:,.2f}'\n\n\ndef plot_bar(\n data, xdata, ydata, title, xaxis_title, yaxis_title, color=\"green\", **kwargs\n):\n dtick = data[ydata[0]].max() / 10 if type(ydata) == list else data[ydata].max() / 10\n axis_title_font_size = 18\n axis_tickfont_size = 15\n\n real_labels = kwargs.get(\"data_labels\", None)\n\n fig = px.bar(\n data,\n x=xdata,\n y=ydata,\n title=title,\n height=500,\n color=kwargs.get(\"color_setup\", None),\n color_discrete_sequence=color,\n barmode=\"overlay\",\n opacity=0.9,\n )\n\n if real_labels:\n labels_dict = {k: v for k, v in zip(data[ydata], real_labels)}\n fig.for_each_trace(lambda t: t.update(name=labels_dict[t.name]))\n\n fig.update_layout(\n title_font_size=20,\n xaxis_title=xaxis_title,\n xaxis_title_font_size=axis_title_font_size,\n xaxis_tickfont_size=axis_tickfont_size,\n yaxis_title=yaxis_title,\n yaxis_tickfont_size=axis_tickfont_size,\n yaxis_title_font_size=axis_title_font_size,\n xaxis=dict(showgrid=True),\n yaxis=dict(showgrid=True, dtick=dtick),\n )\n\n for i, ytrend in enumerate(kwargs.get(\"ytrend\", [])):\n fig.add_trace(\n go.Scatter(\n x=data[xdata] if kwargs.get(\"add_trace\", True) else None,\n y=ytrend,\n mode=\"lines\",\n line=dict(color=kwargs.get(\"trendcolor\", None)[i], width=3),\n name=kwargs.get(\"trend_labels\", None)[i],\n )\n )\n\n currency = st.session_state[\"currency\"]\n # Add hoover information to the plot\n fig.update_traces(\n # hovertemplate=\"
Date: %{x|%b %Y}
Amount: %{y:,.2f} {currency}\",\n hovertemplate=f\"
Date: %{{x|%b %Y}}
Amount: %{{y:.2f}} {currency} \",\n )\n\n fig.update_layout(\n showlegend=kwargs.get(\"showlegend\", False),\n legend=dict(\n x=0.01,\n y=1,\n font=dict(size=14),\n bgcolor=\"rgba(0,0,0,0)\",\n ),\n # hovermode=\"x unified\",\n )\n\n return fig\n", "repo_name": "AndBerna/finance_evaluator", "sub_path": "utils/streamlit_utils.py", "file_name": "streamlit_utils.py", "file_ext": "py", "file_size_in_byte": 3961, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "streamlit.button", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.multiselect", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.session_state.get", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.session_state", "line_number": 32, "usage_type": "attribute"}, {"api_name": "streamlit.errors", "line_number": 39, "usage_type": "attribute"}, {"api_name": "streamlit.error", "line_number": 40, "usage_type": "call"}, {"api_name": "plotly.express.bar", "line_number": 71, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 71, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 101, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 101, "usage_type": "name"}, {"api_name": "streamlit.session_state", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "8801320237", "text": "#!/usr/bin/env python3\n# from GitC.git import Git\n# import GitC.git as git\nimport json\nimport time\nfrom git import main as initialize\n\ndef main():\n init = initialize()\n repo = init.Repository(init)\n check = repo.create()\n if check:\n print(\"Success\")\n else:\n print(\"Failure, repository is already made; otherwise, check json output\")\n return True\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "Codog808/Projects", "sub_path": "computers/config/github/bin/gcreate.py", "file_name": "gcreate.py", "file_ext": "py", "file_size_in_byte": 418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "git.main", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "6433802908", "text": "import json, requests\n\n\napi_key = 'Your API Key'\n \n\nbase_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n \n# Give city name \ncity_name = input(\"Enter city name : \") \n \n\ncomplete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name \n\nresponse = requests.get(complete_url) \n \n# json method of response object \n# convert json format data into \n# python format data \nresponse.raise_for_status()\nx = json.loads(response.text) \ny = x[\"main\"] \n#change Kelvin to Celcius\ncurrent_temperature = y[\"temp\"] - 273.15\ncurrent_pressure = y[\"pressure\"] \ncurrent_humidiy = y[\"humidity\"] \n# store the value of \"weather\" in z\n# z is now list with 0th item only. We need to extract 0th item and store in p\n#p is now a new dict, to get the value of descirption we store it in s\nz = x[\"weather\"] \np = z[0]\ns = p['description']\n# print following values \nprint(\" Temperature (in Celcius) = \" +\n str(current_temperature) + \n \"\\n atmospheric pressure (in hPa unit) = \" +\n str(current_pressure) +\n \"\\n humidity (in percentage) = \" +\n str(current_humidiy) +\n \"\\n description = \" +\n s) \n", "repo_name": "prajaysingh/WeatherAPI", "sub_path": "Weather_API.py", "file_name": "Weather_API.py", "file_ext": "py", "file_size_in_byte": 1178, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "25786526219", "text": "#!/usr/bin/python3\n\nfrom models import chat\nimport os\nimport json\nfrom supabase import create_client\nimport uuid\nfrom datetime import datetime\nfrom models import auth\n\nurl: str = os.environ.get(\"SUPABASE_URL\")\nkey: str = os.environ.get(\"SUPABASE_KEY\")\nsupabase = create_client(url, key)\nlink_id = uuid.uuid4()\n\n\ndef new_link(sender_id, reciever_id):\n \"\"\"add a new link\"\"\"\n lista = []\n data1 = supabase.table('link').select(\n '*').match({'receiver_id': reciever_id, 'sender_id': sender_id}).execute()\n data2 = supabase.table('link').select(\n '*').match({'receiver_id': sender_id, 'sender_id': reciever_id}).execute()\n if data1.data == lista and data2.data == lista:\n structure = {\n \"link_id\": str(link_id),\n \"sender_id\": str(sender_id),\n \"receiver_id\": str(reciever_id),\n \"status\": 1,\n \"linked_at\": str(datetime.now())\n }\n data = supabase.table('link').insert(structure).execute()\n return data\n else:\n raise ('you are alredy connected')\n\n\ndef accept_friendship(link_id, user_id):\n \"\"\"upodates the status of the link so they accept the freinship\"\"\"\n try:\n supabase.table('link').update({'status': 2}).eq(\n 'link_id', link_id).execute()\n chat.generate_chat(user_id)\n\n except:\n raise Exception()\n\n\ndef block_friendship(link_id):\n \"\"\"a method to update the status of the link so that they block the user\"\"\"\n try:\n supabase.table('link').update({'status': 3}).eq(\n 'link_id', link_id).execute()\n except:\n raise Exception()\n\n\ndef search_link(receiver_id, sender_id):\n \"\"\"a function that search for a link\"\"\"\n lista = []\n lk = supabase.table('link').select('link_id').match(\n {'receiver_id': receiver_id, 'sender_id': sender_id}).execute()\n if lk == lista:\n lk = supabase.table('link').select('link_id').match(\n {'receiver_id': sender_id, 'sender_id': receiver_id}).execute()\n if lk == lista:\n raise Exception('no existe un link')\n else:\n return lk.data\n\n\ndef deletefriend(lin_id):\n \"\"\"a function that delete the frienship between two users\"\"\"\n try:\n supabase.table('link').delete().eq('link_id', lin_id).execute()\n except:\n raise Exception()\n\n\ndef list_friends_links(user_id):\n \"\"\"a method that returns all the id of the users who are friends\"\"\"\n friend1 = supabase.table('link').select('receiver_id').match(\n {'status': 2, 'sender_id': user_id}).execute()\n friend2 = supabase.table('link').select('sender_id').match(\n {'status': 2, 'receiver_id': user_id}).execute()\n lista = []\n for friend_id in friend1.data:\n lista.append(friend_id[\"receiver_id\"])\n for friend_id in friend2.data:\n lista.append(friend_id[\"sender_id\"])\n return lista\n\n\ndef generete_random_link(reciever_id, sender_id):\n \"\"\"a method to start a link in 0 relation\"\"\"\n lista = []\n data1 = supabase.table('link').select(\n '*').match({'receiver_id': reciever_id, 'sender_id': sender_id}).execute()\n data2 = supabase.table('link').select(\n '*').match({'receiver_id': sender_id, 'sender_id': reciever_id}).execute()\n if data1.data == lista and data2.data == lista:\n structure = {\n \"link_id\": str(link_id),\n \"sender_id\": str(sender_id),\n \"receiver_id\": str(reciever_id),\n \"status\": 0,\n \"linked_at\": str(datetime.now())\n }\n data = supabase.table('link').insert(structure).execute()\n return data\n else:\n raise ('you are alredy connected')\n\n\ndef list_random(user_id):\n \"\"\"a method that returns all the id of the users who are friends\"\"\"\n friend1 = supabase.table('link').select('receiver_id').match(\n {'status': 0, 'sender_id': user_id}).execute()\n friend2 = supabase.table('link').select('sender_id').match(\n {'status': 0, 'receiver_id': user_id}).execute()\n lista = []\n for friend_id in friend1.data:\n lista.append(friend_id[\"receiver_id\"])\n for friend_id in friend2.data:\n lista.append(friend_id[\"sender_id\"])\n return lista\n", "repo_name": "knappygd/speek", "sub_path": "models/link.py", "file_name": "link.py", "file_ext": "py", "file_size_in_byte": 4175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "33", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "supabase.create_client", "line_number": 13, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 14, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 20, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "supabase.table", "line_number": 32, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 41, "usage_type": "call"}, {"api_name": "models.chat.generate_chat", "line_number": 43, "usage_type": "call"}, {"api_name": "models.chat", "line_number": 43, "usage_type": "name"}, {"api_name": "supabase.table", "line_number": 52, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 61, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 64, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 75, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 82, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 84, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 97, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}, {"api_name": "supabase.table", "line_number": 109, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 117, "usage_type": "call"}, {"api_name": "supabase.table", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "22990774244", "text": "'''\nODE.py : Defines ODE-related things I designed I feel ought to be in scipy.\n\n'''\n\nimport functools\nimport math\nimport mpmath\nimport numpy as np\n\nfrom choreo.scipy_plus.cython.SegmQuad import QuadFormula\nfrom choreo.scipy_plus.cython.ODE import ExplicitSymplecticRKTable\nfrom choreo.scipy_plus.cython.ODE import ImplicitRKTable\n\n# 3 terms definition of polynomial families\n# P_n+1 = (X - a_n) P_n - b_n P_n-1\ndef GaussLegendre3Term(n):\n\n a = mpmath.matrix(n,1)\n b = mpmath.matrix(n,1)\n\n b[0] = 2\n\n for i in range(1,n):\n\n i2 = i*i\n b[i] = mpmath.fraction(i2,4*i2-1)\n\n return a, b\n\ndef ShiftedGaussLegendre3Term(n):\n \n a = mpmath.matrix(n,1)\n b = mpmath.matrix(n,1)\n\n for i in range(n):\n a[i] = mpmath.fraction(1,2)\n\n b[0] = 1\n\n for i in range(1,n):\n\n i2 = i*i\n b[i] = mpmath.fraction(i2,4*(4*i2-1))\n\n return a, b\n\ndef EvalAllFrom3Term(a,b,n,x):\n # n >= 1\n\n phi = mpmath.matrix(n+1,1)\n\n phi[0] = mpmath.mpf(1)\n phi[1] = x - a[0]\n\n for i in range(1,n):\n\n phi[i+1] = (x - a[i]) * phi[i] - b[i] * phi[i-1]\n\n return phi\n\ndef GaussMatFrom3Term(a,b,n):\n \n J = mpmath.matrix(n)\n \n for i in range(n):\n J[i,i] = a[i]\n\n for i in range(n-1):\n\n J[i ,i+1] = mpmath.sqrt(b[i+1])\n J[i+1,i ] = J[i ,i+1]\n\n return J\n\ndef LobattoMatFrom3Term(a,b,n):\n \n J = mpmath.matrix(n)\n \n for i in range(n-1):\n J[i,i] = a[i]\n\n for i in range(n-2):\n\n J[i ,i+1] = mpmath.sqrt(b[i+1])\n J[i+1,i ] = J[i ,i+1]\n\n m = n-1\n\n xm = mpmath.mpf(0)\n phim = EvalAllFrom3Term(a,b,m,xm)\n xp = mpmath.mpf(1)\n phip = EvalAllFrom3Term(a,b,m,xp)\n\n mat = mpmath.matrix(2)\n \n mat[0,0] = phim[m]\n mat[1,0] = phip[m]\n mat[0,1] = phim[m-1]\n mat[1,1] = phip[m-1]\n \n rhs = mpmath.matrix(2,1)\n rhs[0] = xm * phim[m]\n rhs[1] = xp * phip[m]\n\n (alpha, beta) = (mat ** -1) * rhs\n\n J[n-1, n-1] = alpha\n J[n-2, n-1] = mpmath.sqrt(beta)\n J[n-1, n-2] = J[n-2, n-1]\n\n return J\n\ndef RadauMatFrom3Term(a,b,n,x):\n \n J = mpmath.matrix(n)\n \n for i in range(n-1):\n J[i,i] = a[i]\n\n for i in range(n-1):\n\n J[i ,i+1] = mpmath.sqrt(b[i+1])\n J[i+1,i ] = J[i ,i+1]\n\n m = n-1\n phim = EvalAllFrom3Term(a,b,m,x)\n alpha = x - b[m] * phim[m-1] / phim[m]\n\n J[n-1, n-1] = alpha\n\n return J\n\ndef QuadFrom3Term(a,b,n, method = \"Gauss\"):\n\n if method == \"Gauss\":\n J = GaussMatFrom3Term(a,b,n)\n elif method == \"Radau_I\":\n x = mpmath.mpf(0)\n J = RadauMatFrom3Term(a,b,n,x)\n elif method == \"Radau_II\":\n x = mpmath.mpf(1)\n J = RadauMatFrom3Term(a,b,n,x)\n elif method == \"Lobatto_III\":\n J = LobattoMatFrom3Term(a,b,n)\n else:\n raise ValueError(f\"Unknown method {method}\")\n \n z, P = mpmath.mp.eigsy(J)\n\n w = mpmath.matrix(n,1)\n for i in range(n):\n w[i] = b[0] * P[0,i] * P[0,i]\n\n return w, z\n\ndef BuildButcherCMat(z,n,m):\n \n mat = mpmath.matrix(n,m)\n\n for j in range(n):\n for i in range(m):\n mat[j,i] = z[j]**i\n \n return mat\n\ndef BuildButcherCRHS(y,z,n,m):\n \n rhs = mpmath.matrix(n,m)\n\n for j in range(n):\n for i in range(m):\n rhs[j,i] = (z[j]**(i+1) - y[j]**(i+1))/(i+1)\n \n return rhs\n\ndef ComputeButcher_collocation(z,n):\n\n mat = BuildButcherCMat(z,n,n)\n mat_inv = mat ** (-1)\n \n y = mpmath.matrix(n,1)\n for i in range(n):\n y[i] = 0\n \n rhs = BuildButcherCRHS(y,z,n,n)\n Butcher_a = rhs * mat_inv\n \n zp = mpmath.matrix(n,1)\n for i in range(n):\n y[i] = 1\n zp[i] = 1+z[i]\n \n rhs = BuildButcherCRHS(y,zp,n,n)\n Butcher_beta = rhs * mat_inv \n \n for i in range(n):\n y[i] = -1 + z[i]\n zp[i] = 0\n \n rhs = BuildButcherCRHS(y,zp,n,n)\n Butcher_gamma = rhs * mat_inv\n \n return Butcher_a, Butcher_beta, Butcher_gamma\n\ndef ComputeButcher_sub_collocation(z,n):\n\n mat = BuildButcherCMat(z,n-1,n-1)\n mat_inv = mat ** (-1)\n \n y = mpmath.matrix(n,1)\n for i in range(n):\n y[i] = 0\n \n rhs_plus = BuildButcherCRHS(y,z,n,n)\n rhs = rhs_plus[1:n,0:(n-1)]\n Butcher_a_sub = rhs * mat_inv\n \n Butcher_a = mpmath.matrix(n)\n for i in range(n-1):\n for j in range(n-1):\n Butcher_a[i+1,j] = Butcher_a_sub[i,j]\n \n return Butcher_a\n\n\ndef SymmetricAdjointQuadrature(w,z,n):\n\n w_ad = mpmath.matrix(n,1)\n z_ad = mpmath.matrix(n,1)\n\n for i in range(n):\n\n z_ad[i] = 1 - z[n-1-i]\n w_ad[i] = w[n-1-i]\n\n return w_ad, z_ad\n\ndef SymmetricAdjointButcher(Butcher_a, Butcher_b, Butcher_c, Butcher_beta, Butcher_gamma, n):\n\n Butcher_b_ad, Butcher_c_ad = SymmetricAdjointQuadrature(Butcher_b,Butcher_c,n)\n\n Butcher_a_ad = mpmath.matrix(n)\n Butcher_beta_ad = mpmath.matrix(n)\n Butcher_gamma_ad = mpmath.matrix(n)\n\n for i in range(n):\n for j in range(n):\n \n Butcher_a_ad[i,j] = Butcher_b[n-1-j] - Butcher_a[n-1-i,n-1-j]\n\n Butcher_beta_ad[i,j] = Butcher_gamma[n-1-i,n-1-j]\n Butcher_gamma_ad[i,j] = Butcher_beta[n-1-i,n-1-j]\n\n return Butcher_a_ad, Butcher_b_ad, Butcher_c_ad, Butcher_beta_ad, Butcher_gamma_ad\n\ndef SymplecticAdjointButcher(Butcher_a, Butcher_b, n):\n\n Butcher_a_ad = mpmath.matrix(n)\n\n for i in range(n):\n for j in range(n):\n \n Butcher_a_ad[i,j] = Butcher_b[j] * (1 - Butcher_a[j,i] / Butcher_b[i])\n \n return Butcher_a_ad\n\n@functools.cache\ndef ComputeGaussButcherTables(n, dps=60, method=\"Gauss\"):\n\n mpmath.mp.dps = dps\n\n a, b = ShiftedGaussLegendre3Term(n)\n \n for quad_method in [\"Gauss\", \"Radau_II\", \"Radau_I\", \"Lobatto_III\"]:\n if quad_method in method:\n w, z = QuadFrom3Term(a,b,n, method=quad_method)\n break\n else:\n return ValueError(f\"Unknown associated quadrature method {method}\")\n \n Butcher_a, Butcher_beta , Butcher_gamma = ComputeButcher_collocation(z, n)\n \n if method in [\"Lobatto_IIIC\", \"Lobatto_IIIC*\", \"Lobatto_IIID\"]:\n Butcher_a = ComputeButcher_sub_collocation(z,n)\n \n known_method = False\n if method in [\"Gauss\", \"Lobatto_IIIA\", \"Radau_IIA\", \"Lobatto_IIIC*\"]:\n # No transformation is required\n known_method = True\n \n if method in [\"Lobatto_IIIB\", \"Radau_IA\", \"Lobatto_IIIC\"]:\n known_method = True\n # Symplectic adjoint\n Butcher_a = SymplecticAdjointButcher(Butcher_a, w, n) \n \n if method in [\"Radau_IB\", \"Radau_IIB\", \"Lobatto_IIIS\", \"Lobatto_IIID\" ]:\n known_method = True\n # Symplectic adjoint average\n Butcher_a_ad = SymplecticAdjointButcher(Butcher_a, w, n) \n Butcher_a = (Butcher_a_ad + Butcher_a) / 2\n \n if not(known_method):\n raise ValueError(f'Unknown method {method}')\n \n return Butcher_a, w, z, Butcher_beta, Butcher_gamma\n\ndef GetConvergenceRate(method, n):\n \n if \"Gauss\" in method:\n if n < 1:\n raise ValueError(f\"Incorrect value for n {n}\")\n th_cvg_rate = 2*n\n elif \"Radau\" in method:\n if n < 2:\n raise ValueError(f\"Incorrect value for n {n}\")\n th_cvg_rate = 2*n-1\n elif \"Lobatto\" in method:\n if n < 2:\n raise ValueError(f\"Incorrect value for n {n}\")\n th_cvg_rate = 2*n-2\n else:\n raise ValueError(f\"Unknown method {method}\")\n \n return th_cvg_rate\n\n@functools.cache\ndef ComputeQuadrature(n, dps=30, method=\"Gauss\"):\n\n th_cvg_rate = GetConvergenceRate(method, n)\n \n mpmath.mp.dps = dps\n a, b = ShiftedGaussLegendre3Term(n)\n w, z = QuadFrom3Term(a,b,n,method=method)\n\n w_np = np.array(w.tolist(),dtype=np.float64).reshape(n)\n z_np = np.array(z.tolist(),dtype=np.float64).reshape(n)\n \n return QuadFormula(\n w = w_np ,\n x = z_np ,\n th_cvg_rate = th_cvg_rate ,\n )\n\ndef ComputeImplicitRKTable_Gauss(n, dps=60, method=\"Gauss\"):\n\n th_cvg_rate = GetConvergenceRate(method, n)\n \n Butcher_a, Butcher_b, Butcher_c, Butcher_beta, Butcher_gamma = ComputeGaussButcherTables(n, dps=dps, method=method)\n\n Butcher_a_np = np.array(Butcher_a.tolist(),dtype=np.float64)\n Butcher_b_np = np.array(Butcher_b.tolist(),dtype=np.float64).reshape(n)\n Butcher_c_np = np.array(Butcher_c.tolist(),dtype=np.float64).reshape(n)\n Butcher_beta_np = np.array(Butcher_beta.tolist(),dtype=np.float64)\n Butcher_gamma_np = np.array(Butcher_gamma.tolist(),dtype=np.float64)\n \n return ImplicitRKTable(\n a_table = Butcher_a_np ,\n b_table = Butcher_b_np ,\n c_table = Butcher_c_np ,\n beta_table = Butcher_beta_np ,\n gamma_table = Butcher_gamma_np ,\n th_cvg_rate = th_cvg_rate ,\n )\n \ndef ComputeImplicitSymplecticRKTablePair_Gauss(n, dps=60, method=\"Gauss\"):\n \n th_cvg_rate = GetConvergenceRate(method, n)\n \n Butcher_a, Butcher_b, Butcher_c, Butcher_beta, Butcher_gamma = ComputeGaussButcherTables(n, dps=dps, method=method)\n Butcher_a_ad = SymplecticAdjointButcher(Butcher_a, Butcher_b, n) \n \n Butcher_a_np = np.array(Butcher_a.tolist(),dtype=np.float64)\n Butcher_b_np = np.array(Butcher_b.tolist(),dtype=np.float64).reshape(n)\n Butcher_c_np = np.array(Butcher_c.tolist(),dtype=np.float64).reshape(n)\n Butcher_beta_np = np.array(Butcher_beta.tolist(),dtype=np.float64)\n Butcher_gamma_np = np.array(Butcher_gamma.tolist(),dtype=np.float64)\n Butcher_a_ad_np = np.array(Butcher_a_ad.tolist(),dtype=np.float64)\n \n rk = ImplicitRKTable(\n a_table = Butcher_a_np ,\n b_table = Butcher_b_np ,\n c_table = Butcher_c_np ,\n beta_table = Butcher_beta_np ,\n gamma_table = Butcher_gamma_np ,\n th_cvg_rate = th_cvg_rate ,\n )\n \n rk_ad = ImplicitRKTable(\n a_table = Butcher_a_ad_np ,\n b_table = Butcher_b_np ,\n c_table = Butcher_c_np ,\n beta_table = Butcher_beta_np ,\n gamma_table = Butcher_gamma_np ,\n th_cvg_rate = th_cvg_rate ,\n )\n \n return rk, rk_ad\n \ndef Yoshida_w_to_cd(w_in, th_cvg_rate):\n '''\n input : vector w as in Construction of higher order symplectic integrators in PHYSICS LETTERS A by Haruo Yoshida 1990.\n \n w[1:m+1] (m elements) is provided. w0 is implicit.\n\n '''\n \n m = w_in.shape[0]\n \n wo = 1-2*math.fsum(w_in)\n w = np.zeros((m+1),dtype=np.float64)\n w[0] = wo\n for i in range(m):\n w[i+1] = w_in[i]\n \n n = 2*m + 2\n\n c_table = np.zeros((n),dtype=np.float64) \n d_table = np.zeros((n),dtype=np.float64) \n \n for i in range(m): \n val = w[m-i]\n d_table[i] = val\n d_table[2*m-i] = val\n d_table[m] = w[0]\n \n c_table[0] = w[m] / 2\n c_table[2*m+1] = w[m] / 2\n for i in range(m): \n val = (w[m-i]+w[m-1-i]) / 2\n c_table[i+1] = val\n c_table[2*m-i] = val\n \n return ExplicitSymplecticRKTable(\n c_table ,\n d_table ,\n th_cvg_rate ,\n )\n \ndef Yoshida_w_to_cd_reduced(w, th_cvg_rate):\n '''\n Variation on Yosida's method\n \n input : vector w as in Construction of higher order symplectic integrators in PHYSICS LETTERS A by Haruo Yoshida 1990.\n \n w[1:m+1] (m elements) is provided.\n\n '''\n \n m = w.shape[0]\n n = 2*m\n\n c_table = np.zeros((n),dtype=np.float64) \n d_table = np.zeros((n),dtype=np.float64) \n \n for i in range(m): \n val = w[m-1-i]\n d_table[i] = val\n d_table[n-2-i] = val\n\n \n c_table[0] = w[m-1] / 2\n c_table[n-1] = w[m-1] / 2\n for i in range(m-1): \n val = (w[m-1-i]+w[m-2-i]) / 2\n c_table[i+1] = val\n c_table[n-2-i] = val\n \n return ExplicitSymplecticRKTable(\n c_table ,\n d_table ,\n th_cvg_rate ,\n )\n \n ", "repo_name": "gabrielfougeron/choreo", "sub_path": "choreo/scipy_plus/multiprec_tables.py", "file_name": "multiprec_tables.py", "file_ext": "py", "file_size_in_byte": 12119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "33", "api": [{"api_name": "mpmath.matrix", "line_number": 19, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 20, "usage_type": "call"}, {"api_name": "mpmath.fraction", "line_number": 27, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 33, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 34, "usage_type": "call"}, {"api_name": "mpmath.fraction", "line_number": 37, "usage_type": "call"}, {"api_name": "mpmath.fraction", "line_number": 44, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 51, "usage_type": "call"}, {"api_name": "mpmath.mpf", "line_number": 53, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 64, "usage_type": "call"}, {"api_name": "mpmath.sqrt", "line_number": 71, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 78, "usage_type": "call"}, {"api_name": "mpmath.sqrt", "line_number": 85, "usage_type": "call"}, {"api_name": "mpmath.mpf", "line_number": 90, "usage_type": "call"}, {"api_name": "mpmath.mpf", "line_number": 92, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 95, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 102, "usage_type": "call"}, {"api_name": "mpmath.sqrt", "line_number": 109, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 116, "usage_type": "call"}, {"api_name": "mpmath.sqrt", "line_number": 123, "usage_type": "call"}, {"api_name": "mpmath.mpf", "line_number": 139, "usage_type": "call"}, {"api_name": "mpmath.mpf", "line_number": 142, "usage_type": "call"}, {"api_name": "mpmath.mp.eigsy", "line_number": 149, "usage_type": "call"}, {"api_name": "mpmath.mp", "line_number": 149, "usage_type": "attribute"}, {"api_name": "mpmath.matrix", "line_number": 151, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 159, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 169, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 182, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 189, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 211, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 219, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 229, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 230, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 243, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 244, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 245, "usage_type": "call"}, {"api_name": "mpmath.matrix", "line_number": 259, "usage_type": "call"}, {"api_name": "mpmath.mp", "line_number": 271, "usage_type": "attribute"}, {"api_name": "functools.cache", "line_number": 268, "usage_type": "attribute"}, {"api_name": "mpmath.mp", "line_number": 332, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 337, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 337, "usage_type": "attribute"}, {"api_name": "choreo.scipy_plus.cython.SegmQuad.QuadFormula", "line_number": 339, "usage_type": "call"}, {"api_name": "functools.cache", "line_number": 327, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 351, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 353, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 354, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 355, "usage_type": "attribute"}, {"api_name": "choreo.scipy_plus.cython.ODE.ImplicitRKTable", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 373, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 374, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 375, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 377, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 378, "usage_type": "attribute"}, {"api_name": "choreo.scipy_plus.cython.ODE.ImplicitRKTable", "line_number": 380, "usage_type": "call"}, {"api_name": "choreo.scipy_plus.cython.ODE.ImplicitRKTable", "line_number": 389, "usage_type": "call"}, {"api_name": "math.fsum", "line_number": 410, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 411, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 418, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 419, "usage_type": "attribute"}, {"api_name": "choreo.scipy_plus.cython.ODE.ExplicitSymplecticRKTable", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 453, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 453, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 454, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 454, "usage_type": "attribute"}, {"api_name": "choreo.scipy_plus.cython.ODE.ExplicitSymplecticRKTable", "line_number": 469, "usage_type": "call"}]} +{"seq_id": "29241255732", "text": "from sys import stdin\nfrom collections import deque\n\nN, M = map(int, stdin.readline().split())\n\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\n\nm = [''] * N\nfor i in range(N): \n\tv = stdin.readline()[:-1]\n\tm[i] = v\n\ndef bfs():\n\tvisited = [[[0] * 2 for _ in range(M)] for _ in range(N)]\n\tvisited[0][0][0] = 1\n\tq = deque()\n\tq.append([0,0,0])\n\twhile(len(q) > 0):\n\t\t[x, y, z] = q.popleft()\n\t\tif(x == M-1 and y == N-1):return visited[y][x][z]\n\t\tfor i in range(4):\n\t\t\tnx = x + dx[i]\n\t\t\tny = y + dy[i]\n\t\t\tif(nx < 0 or nx >= M or ny < 0 or ny >= N): continue\n\t\t\tif(m[ny][nx] == '1' and z == 0):\n\t\t\t\tvisited[ny][nx][1] = visited[y][x][0] + 1\n\t\t\t\tq.append((nx, ny, 1))\n\t\t\telif(m[ny][nx] == '0' and visited[ny][nx][z] == 0):\n\t\t\t\tvisited[ny][nx][z] = visited[y][x][z] + 1\n\t\t\t\tq.append((nx, ny, z))\n\treturn -1\nprint(bfs())\n", "repo_name": "WAQESD/Algorithm", "sub_path": "220627/2206.py", "file_name": "2206.py", "file_ext": "py", "file_size_in_byte": 800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "sys.stdin.readline", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 4, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 11, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "17262690933", "text": "from datetime import datetime\nfrom typing import Optional, List\nfrom enum import Enum\n\nfrom beanie import Document, Link\nfrom pydantic import BaseModel, Field\n\nfrom app.server.models.customers import Customer\nfrom app.server.models.users import User\n\nclass ProjectStatus(str, Enum):\n alive = \"Alive\"\n stop = \"Stop\"\n end = \"End\"\n delayed = \"Delayed\"\n\nclass UaRole(str, Enum):\n pm = \"PM\"\n business = \"사업\"\n plan = \"기획\"\n design = \"디자인\"\n appfe = \"앱개발\"\n webfe = \"웹프론트개발\"\n backend = \"백엔드개발\"\n qa = \"QA\"\n\nclass UserAttendance(Document):\n uaUser: Link[User]\n uaRole: UaRole\n uaStartDate: Optional[datetime] = None\n uaEndDate: Optional[datetime] = None\n recordDate: datetime = datetime.now()\n\n\nclass Project(Document):\n projectCode: str\n projectName: str\n projectCustomer: Link[Customer]\n projectPM: Link[User]\n projectUsers: List[Link[User]]\n projectStartDate: Optional[datetime] = None\n projectEndDate: Optional[datetime] = None\n actualEndDate: Optional[datetime] = None\n projectStatus: ProjectStatus #= Field(None, alias=\"Status\")\n projectRemark: str\n projectAttendances: Optional[List[UserAttendance]]\n recordDate: datetime = datetime.now()\n\n class Settings:\n name = \"Project\"\n\n class Config:\n schema_extra = {\n \"example\": {\n \"projectCode\": \"PJ-OR999-A01\",\n \"projectName\": \"2023 전사교육\",\n \"projectCustomer\" : \"63fc2e50ed67382c9a72fd06\",\n \"projectPM\": \"63fc658baa02417be4600e28\",\n \"projectUsers\": [\n \"63fc658baa02417be4600e28\",\n \"63fc658baa02417be4600e28\",\n ],\n \"projectStartDate\": datetime.now(),\n \"projectStatus\": ProjectStatus.alive,\n \"projectRemark\": \"전사공통\",\n \"projectAttendances\": [ { \n \"uaUser\": \"63fc658baa02417be4600e28\",\n \"uaRole\": UaRole.pm,\n \"uaStartDate\": datetime.now(),\n \"uaEndDate\": datetime.now(),\n \"recordDate\": datetime.now()\n } ],\n \"recordDate\": datetime.now()\n }\n }\nclass UpdateProject(BaseModel):\n projectCode: Optional[str]\n projectName: Optional[str]\n projectCustomer: Optional[Customer]\n projectPM: Optional[User]\n projectStartDate: Optional[datetime]\n projectEndDate: Optional[datetime]\n actualEndDate: Optional[datetime]\n projectStatus: Optional[str]\n projectRemark: Optional[str]\n projectAttendances: Optional[List[UserAttendance]]\n recordDate: Optional[datetime]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"projectCode\": \"PJ-OR999-A01\",\n \"projectName\": \"2023년 전사교육\",\n \"projectCustomer\" : \"63fc2e50ed67382c9a72fd06\",\n \"projectPM\": \"a\",\n \"projectStartDate\": datetime.now(),\n \"projectEndDate\": datetime.now(),\n \"projectStatus\": \"alive\",\n \"projectRemark\": \"전사공통\",\n \"projectAttendances\": [ { \n \"uaUser\": \"63fc658baa02417be4600e28\",\n \"uaRole\": UaRole.pm,\n \"uaStartDate\": datetime.now(),\n \"uaEndDate\": datetime.now(),\n \"recordDate\": datetime.now()\n } ], \n \"recordDate\": datetime.now()\n }\n }", "repo_name": "bookendus/M-MIS", "sub_path": "backend/app/server/models/projects.py", "file_name": "projects.py", "file_ext": "py", "file_size_in_byte": 3579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "enum.Enum", "line_number": 11, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 17, "usage_type": "name"}, {"api_name": "beanie.Document", "line_number": 27, "usage_type": "name"}, {"api_name": "beanie.Link", "line_number": 28, "usage_type": "name"}, {"api_name": "app.server.models.users.User", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "beanie.Document", "line_number": 35, "usage_type": "name"}, {"api_name": "beanie.Link", "line_number": 38, "usage_type": "name"}, {"api_name": "app.server.models.customers.Customer", "line_number": 38, "usage_type": "name"}, {"api_name": "beanie.Link", "line_number": 39, "usage_type": "name"}, {"api_name": "app.server.models.users.User", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "name"}, {"api_name": "beanie.Link", "line_number": 40, "usage_type": "name"}, {"api_name": "app.server.models.users.User", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 79, "usage_type": "name"}, {"api_name": "app.server.models.customers.Customer", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 80, "usage_type": "name"}, {"api_name": "app.server.models.users.User", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 82, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 83, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 87, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 104, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "31400777147", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2020/7/16 下午2:06\n# @Author : HOY\n# @Email : huangouyan@changingedu.com\n# @File : xlnet.py\n# @Software: PyCharm\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformers import XLNetTokenizer, XLNetForSequenceClassification, XLNetModel\n\n\nclass Config():\n\n \"\"\"配置参数\"\"\"\n def __init__(self, dataset):\n self.model_name = 'xlnet'\n self.train_path = 'train.csv' # 训练集\n self.class_list = [x.strip() for x in open(\n dataset + '/class_multi1.txt').readlines()] # 类别名单\n self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备\n\n self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练\n self.num_classes = len(self.class_list) # 类别数\n self.num_epochs = 3 # epoch数\n self.batch_size = 64 # 128mini-batch大小\n self.pad_size = 32 # 每句话处理成的长度(短填长切)\n self.learning_rate = 5e-5 # 学习率\n self.xlnet_path = './xlnet_pretrain'\n self.tokenizer = XLNetTokenizer.from_pretrained(self.xlnet_path)\n self.hidden_size = 768\n\n\nclass Model(nn.Module):\n\n def __init__(self, config):\n super(Model, self).__init__()\n self.xlnet = XLNetModel.from_pretrained(config.xlnet_path, num_labels=config.num_classes)\n for param in list(self.xlnet.parameters())[:-5]:\n param.requires_grad = False\n self.fc = nn.Linear(config.hidden_size, 192)\n self.fc1 = nn.Linear(192, config.num_classes)\n\n def forward(self, x):\n context = x[0] # 输入的句子\n mask = x[2] # 对padding部分进行mask,和句子一个size,padding部分用0表示,如:[1, 1, 1, 1, 0, 0]\n logits = self.xlnet(input_ids=context, attention_mask=mask)\n logits = logits[0]\n out = logits[:, -1]\n out = self.fc(out)\n out = F.relu(out)\n out = self.fc1(out)\n return out\n", "repo_name": "HoyTta0/KnowledgeDistillation", "sub_path": "models/xlnet.py", "file_name": "xlnet.py", "file_ext": "py", "file_size_in_byte": 2409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 193, "dataset": "github-code", "pt": "41", "api": [{"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "transformers.XLNetTokenizer.from_pretrained", "line_number": 33, "usage_type": "call"}, {"api_name": "transformers.XLNetTokenizer", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "transformers.XLNetModel.from_pretrained", "line_number": 41, "usage_type": "call"}, {"api_name": "transformers.XLNetModel", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "42570991216", "text": "# coding: utf-8\n\n\"\"\"\n Fulfillment API\n\n Use the Fulfillment API to complete the process of packaging, addressing, handling, and shipping each order on behalf of the seller, in accordance with the payment method and timing specified at checkout. # noqa: E501\n\n OpenAPI spec version: v1.19.19\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass PaymentSummary(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'payments': 'list[Payment]',\n 'refunds': 'list[OrderRefund]',\n 'total_due_seller': 'Amount'\n }\n\n attribute_map = {\n 'payments': 'payments',\n 'refunds': 'refunds',\n 'total_due_seller': 'totalDueSeller'\n }\n\n def __init__(self, payments=None, refunds=None, total_due_seller=None): # noqa: E501\n \"\"\"PaymentSummary - a model defined in Swagger\"\"\" # noqa: E501\n self._payments = None\n self._refunds = None\n self._total_due_seller = None\n self.discriminator = None\n if payments is not None:\n self.payments = payments\n if refunds is not None:\n self.refunds = refunds\n if total_due_seller is not None:\n self.total_due_seller = total_due_seller\n\n @property\n def payments(self):\n \"\"\"Gets the payments of this PaymentSummary. # noqa: E501\n\n This array consists of payment information for the order, including payment status, payment method, payment amount, and payment date. This array is always returned, although some of the fields under this container will not be returned until payment has been made. # noqa: E501\n\n :return: The payments of this PaymentSummary. # noqa: E501\n :rtype: list[Payment]\n \"\"\"\n return self._payments\n\n @payments.setter\n def payments(self, payments):\n \"\"\"Sets the payments of this PaymentSummary.\n\n This array consists of payment information for the order, including payment status, payment method, payment amount, and payment date. This array is always returned, although some of the fields under this container will not be returned until payment has been made. # noqa: E501\n\n :param payments: The payments of this PaymentSummary. # noqa: E501\n :type: list[Payment]\n \"\"\"\n\n self._payments = payments\n\n @property\n def refunds(self):\n \"\"\"Gets the refunds of this PaymentSummary. # noqa: E501\n\n This array is always returned, but is returned as an empty array unless the seller has submitted a partial or full refund to the buyer for the order. If a refund has occurred, the refund amount and refund date will be shown for each refund. # noqa: E501\n\n :return: The refunds of this PaymentSummary. # noqa: E501\n :rtype: list[OrderRefund]\n \"\"\"\n return self._refunds\n\n @refunds.setter\n def refunds(self, refunds):\n \"\"\"Sets the refunds of this PaymentSummary.\n\n This array is always returned, but is returned as an empty array unless the seller has submitted a partial or full refund to the buyer for the order. If a refund has occurred, the refund amount and refund date will be shown for each refund. # noqa: E501\n\n :param refunds: The refunds of this PaymentSummary. # noqa: E501\n :type: list[OrderRefund]\n \"\"\"\n\n self._refunds = refunds\n\n @property\n def total_due_seller(self):\n \"\"\"Gets the total_due_seller of this PaymentSummary. # noqa: E501\n\n\n :return: The total_due_seller of this PaymentSummary. # noqa: E501\n :rtype: Amount\n \"\"\"\n return self._total_due_seller\n\n @total_due_seller.setter\n def total_due_seller(self, total_due_seller):\n \"\"\"Sets the total_due_seller of this PaymentSummary.\n\n\n :param total_due_seller: The total_due_seller of this PaymentSummary. # noqa: E501\n :type: Amount\n \"\"\"\n\n self._total_due_seller = total_due_seller\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(PaymentSummary, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PaymentSummary):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "repo_name": "matecsaj/ebay_rest", "sub_path": "src/ebay_rest/api/sell_fulfillment/models/payment_summary.py", "file_name": "payment_summary.py", "file_ext": "py", "file_size_in_byte": 5869, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 30, "dataset": "github-code", "pt": "33", "api": [{"api_name": "six.iteritems", "line_number": 126, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "14339543433", "text": "import math\nfrom math import log\nimport baxcat.utils.cc_general_utils as utils\nfrom scipy.special import gammaln\nimport random\nimport numpy\n\nfrom scipy.misc import logsumexp\n\nimport pylab\n\nclass cc_poisson(object):\n \"\"\"\n Poisson (count) data with gamma prior on lambda.\n Does not require additional argumets (distargs=None).\n \"\"\"\n\n cctype = 'poisson'\n\n def __init__(self, N=0, sum_x=0, sum_log_fact_x=0, a=1, b=1, distargs=None):\n \"\"\"\n Optional arguments:\n -- N: number of data points\n -- sum_x: suffstat, sum(X)\n -- sum_x_log_fact_x: suffstat, sum(log(X!))\n -- a: hyperparameter\n -- b: hyperparameter\n -- distargs: not used\n \"\"\"\n assert a > 0\n assert b > 0\n self.N = N\n self.sum_x = sum_x\n self.sum_log_fact_x = sum_log_fact_x\n self.a = a\n self.b = b\n\n def set_hypers(self, hypers):\n assert hypers['a'] > 0\n assert hypers['b'] > 0\n\n self.b = hypers['b']\n self.a = hypers['a']\n\n def insert_element(self, x):\n self.N += 1.0\n self.sum_x += x\n self.sum_log_fact_x += gammaln(x+1)\n\n def remove_element(self, x):\n self.N -= 1.0\n self.sum_x -= x\n self.sum_log_fact_x -= gammaln(x+1)\n\n def predictive_logp(self, x):\n return self.calc_predictive_logp(x, self.N, self.sum_x, self.sum_log_fact_x, \n self.a, self.b)\n\n def singleton_logp(self, x):\n return self.calc_predictive_logp(x, 0, 0, 0,\n self.a, self.b)\n\n def marginal_logp(self):\n return self.calc_marginal_logp(self.N, self.sum_x, self.sum_log_fact_x,\n self.a, self.b)\n\n def predictive_draw(self):\n an, bn = cc_poisson.posterior_update_parameters(self.N,\n self.sum_x, self.a, self.b)\n draw = numpy.random.negative_binomial(an, bn/(bn+1.0))\n return draw\n # fn = lambda x: numpy.exp(self.predictive_logp(x))\n # lower_bound = 0\n # delta = 1\n # return utils.inversion_sampling(fn, lower_bound, delta)\n\n @staticmethod\n def construct_hyper_grids(X,n_grid=30):\n grids = dict()\n # only use integers for a so we can nicely draw from a negative binomial\n # in predictive_draw\n grids['a'] = numpy.unique(numpy.round(numpy.linspace(1,len(X),n_grid)))\n grids['b'] = utils.log_linspace(.1,float(len(X)), n_grid)\n return grids\n\n @staticmethod\n def init_hypers(grids, X=None): \n hypers = dict()\n hypers['a'] = random.choice(grids['a'])\n hypers['b'] = random.choice(grids['b'])\n\n return hypers\n\n @staticmethod\n def calc_predictive_logp(x, N, sum_x, sum_log_fact_x, a, b):\n\n an, bn = cc_poisson.posterior_update_parameters(\n N, sum_x, a, b)\n\n am, bm = cc_poisson.posterior_update_parameters(\n N+1, sum_x+x, a, b)\n\n ZN = cc_poisson.calc_log_Z(an, bn)\n ZM = cc_poisson.calc_log_Z(am, bm)\n\n return ZM - ZN - gammaln(x+1)\n\n @staticmethod\n def calc_marginal_logp(N, sum_x, sum_log_fact_x, a, b):\n an, bn = cc_poisson.posterior_update_parameters(\n N, sum_x, a, b)\n\n Z0 = cc_poisson.calc_log_Z(a, b)\n ZN = cc_poisson.calc_log_Z(an, bn)\n\n return ZN - Z0 - sum_log_fact_x\n\n @staticmethod\n def update_hypers(clusters, grids):\n # resample alpha\n a = clusters[0].a\n b = clusters[0].b\n\n which_hypers = [0,1]\n random.shuffle(which_hypers)\n\n for hyper in which_hypers:\n if hyper == 0:\n lp_a = cc_poisson.calc_a_conditional_logps(clusters, grids['a'], b)\n a_index = utils.log_pflip(lp_a)\n a = grids['a'][a_index]\n elif hyper == 1:\n lp_b = cc_poisson.calc_b_conditional_logps(clusters, grids['b'], a)\n b_index = utils.log_pflip(lp_b)\n b = grids['b'][b_index]\n else:\n raise ValueError(\"invalid hyper\")\n\n hypers = dict()\n hypers['a'] = a\n hypers['b'] = b\n \n return hypers\n\n @staticmethod\n def posterior_update_parameters(N, sum_x, a, b):\n an = a + sum_x\n bn = b + N\n return an, bn\n\n @staticmethod \n def calc_log_Z(a, b):\n Z = gammaln(a)-a*log(b)\n\n return Z\n \n @staticmethod\n def calc_a_conditional_logps(clusters, a_grid, b):\n lps = []\n for a in a_grid:\n lp = cc_poisson.calc_full_marginal_conditional(clusters, a, b)\n lps.append(lp)\n\n return lps\n\n @staticmethod\n def calc_b_conditional_logps(clusters, b_grid, a):\n lps = []\n for b in b_grid:\n lp = cc_poisson.calc_full_marginal_conditional(clusters, a, b)\n lps.append(lp)\n\n return lps\n\n @staticmethod\n def calc_full_marginal_conditional(clusters, a, b):\n lp = 0\n for cluster in clusters:\n N = cluster.N\n sum_x = cluster.sum_x\n sum_log_fact_x = cluster.sum_log_fact_x\n l = cc_poisson.calc_marginal_logp(N, sum_x, sum_log_fact_x, a, b)\n lp += l\n\n return lp\n\n @staticmethod\n def calc_full_marginal_conditional_h(clusters, hypers):\n lp = 0\n a = clusters[0].a\n b = clusters[0].b\n for cluster in clusters:\n N = cluster.N\n sum_x = cluster.sum_x\n sum_log_fact_x = cluster.sum_log_fact_x\n l = cc_poisson.calc_marginal_logp(N, sum_x, sum_log_fact_x, a, b)\n lp += l\n\n return lp\n\n @staticmethod\n def plot_dist(X, clusters, distargs=None):\n colors = [\"red\", \"blue\", \"green\", \"yellow\", \"orange\", \"purple\", \"brown\", \"black\"]\n x_min = min(X)\n x_max = max(X)\n Y = range(int(x_max)+1)\n nn = len(Y)\n K = len(clusters)\n pdf = numpy.zeros((K,nn))\n denom = log(float(len(X)))\n\n a = clusters[0].a\n b = clusters[0].b\n\n nbins = min([len(Y), 50])\n\n toplt = numpy.array(utils.bincount(X,Y))/float(len(X))\n\n pylab.bar(Y, toplt, color=\"gray\", edgecolor=\"none\")\n\n W = [log(clusters[k].N) - denom for k in range(K)]\n\n for k in range(K):\n w = W[k]\n N = clusters[k].N\n sum_x = clusters[k].sum_x\n sum_log_fact_x = clusters[k].sum_log_fact_x\n for n in range(nn):\n y = Y[n]\n pdf[k, n] = numpy.exp(w + cc_poisson.calc_predictive_logp(y, N, sum_x, \n sum_log_fact_x, a, b))\n\n if k >= 8:\n color = \"white\"\n alpha=.3\n else:\n color = colors[k]\n alpha=.7\n pylab.bar(Y, pdf[k,:], color=color, edgecolor='none', alpha=alpha)\n\n pylab.bar(Y, numpy.sum(pdf,axis=0), color='none', edgecolor='black', linewidth=3)\n\n # print integral for debugging (should never be greater that 1)\n # print utils.line_quad(Y, numpy.sum(pdf,axis=0))\n pylab.xlim([0, x_max+1])\n pylab.title('poisson')\n\n", "repo_name": "BaxterEaves/BaxCat", "sub_path": "baxcat/cc_types/cc_poisson_model.py", "file_name": "cc_poisson_model.py", "file_ext": "py", "file_size_in_byte": 7206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "scipy.special.gammaln", "line_number": 48, "usage_type": "call"}, {"api_name": "scipy.special.gammaln", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random.negative_binomial", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 82, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils.log_linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils", "line_number": 83, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 89, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.special.gammaln", "line_number": 106, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 125, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils.log_pflip", "line_number": 130, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils", "line_number": 130, "usage_type": "name"}, {"api_name": "baxcat.utils.cc_general_utils.log_pflip", "line_number": 134, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils", "line_number": 134, "usage_type": "name"}, {"api_name": "scipy.special.gammaln", "line_number": 153, "usage_type": "call"}, {"api_name": "math.log", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 209, "usage_type": "call"}, {"api_name": "math.log", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 217, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils.bincount", "line_number": 217, "usage_type": "call"}, {"api_name": "baxcat.utils.cc_general_utils", "line_number": 217, "usage_type": "name"}, {"api_name": "pylab.bar", "line_number": 219, "usage_type": "call"}, {"api_name": "math.log", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 230, "usage_type": "call"}, {"api_name": "pylab.bar", "line_number": 239, "usage_type": "call"}, {"api_name": "pylab.bar", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 241, "usage_type": "call"}, {"api_name": "pylab.xlim", "line_number": 245, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 246, "usage_type": "call"}]} +{"seq_id": "14354865498", "text": "#!/usr/bin/env python\nimport logging\n\nimport click\n\nimport schedule\nimport settings\n\n\n@click.group()\ndef manage():\n \"\"\"Management actions for this flask application.\n\n \"\"\"\n pass\n\n\n@manage.command()\ndef info():\n \"\"\"Prints all configuration variables.\n\n \"\"\"\n for k, v in settings.freeze().items():\n click.echo(f\"{k}={v}\")\n\n\n@manage.command()\ndef startpoller():\n \"\"\"Starts the http poller.\n\n \"\"\"\n logging_levels = {\n \"CRITICAL\": logging.CRITICAL,\n \"ERROR\": logging.ERROR,\n \"WARNING\": logging.WARNING,\n \"INFO\": logging.INFO,\n \"DEBUG\": logging.DEBUG,\n \"NOTSET\": logging.NOTSET,\n }\n logging.basicConfig(\n level=logging_levels.get(settings.LOG_LEVEL.upper(), logging.INFO),\n format=\"[%(levelname)s] [%(name)s] %(message)s\",\n )\n schedule.start()\n\n\nif __name__ == \"__main__\":\n manage()\n", "repo_name": "oliverhernandezmoreno/SourcesOH", "sub_path": "tranque_v1.8.4_source/http-poller-producer/src/manage.py", "file_name": "manage.py", "file_ext": "py", "file_size_in_byte": 882, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "click.group", "line_number": 10, "usage_type": "call"}, {"api_name": "settings.freeze", "line_number": 23, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.NOTSET", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 40, "usage_type": "call"}, {"api_name": "settings.LOG_LEVEL.upper", "line_number": 41, "usage_type": "call"}, {"api_name": "settings.LOG_LEVEL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 41, "usage_type": "attribute"}, {"api_name": "schedule.start", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "9375724096", "text": "__version__ = \"$Revision$ $Date$\"\n__license__ = \"\"\"\n Copyright (C) 2008-2010 Proformatique \n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\n\nimport distutils.dir_util\nimport glob\nimport os.path\nimport shutil\nimport tempfile\nfrom xivo_fetchfw import fetchfw\n\n# Destination path for Cisco SMB firmwares.\nsmb_fw_dst_path = os.path.join(fetchfw.TFTP_PATH, 'CiscoSMB', 'firmware')\nsmb_dict_dst_path = os.path.join(fetchfw.TFTP_PATH, 'CiscoSMB', 'i18n')\n\nGZIP_MAGIC_NUMBER_ = '\\x1f\\x8b' # see http://www.gzip.org/zlib/rfc-gzip.html#file-format\n\n\nclass InvalidFileError(Exception):\n pass\n\n\ndef unsign_from_fileobj(f_in, f_out):\n \"\"\"Unsign the content of file-object f_in and write the extracted gzip file\n to file-object f_out.\n \n Note that f_in and f_out should be open in binary mode. This function does\n not call close on the file-object. Also, if you pass it garbage input, you\n might as well receive garbage output.\n \"\"\"\n bytes_in = f_in.read(4096)\n index = bytes_in.find(GZIP_MAGIC_NUMBER_)\n if index == -1:\n raise InvalidFileError(u'This .sgn file doesn\\'t hold a gzip file')\n bytes_in = bytes_in[index:]\n while bytes_in:\n f_out.write(bytes_in)\n bytes_in = f_in.read(4096)\n\n\ndef ciscospa5xx_install_fw(firmware, fw_name):\n zipfile_path = firmware.remote_files[0].path\n fetchfw.makedirs(smb_fw_dst_path)\n fetchfw.zip_extract_files(zipfile_path, (fw_name,), smb_fw_dst_path)\n\n\ndef ciscospa5xx_metainstall_fw(fw_name):\n def aux(firmware):\n ciscospa5xx_install_fw(firmware, fw_name)\n return aux\n\n\ndef ciscospa5xx_install_locale(xfile, locale_src_file, locale_dst_file):\n zipfile_path = xfile.path\n unzip_dir = fetchfw.zip_extract_all('spa5xx_lang', zipfile_path)\n fetchfw.makedirs(smb_dict_dst_path)\n shutil.copy(os.path.join(unzip_dir, locale_src_file),\n os.path.join(smb_dict_dst_path, locale_dst_file))\n\n\ndef ciscospa5xx_metainstall_locale(src_525, dst_525, src_50x, dst_50x):\n def aux(firmware):\n ciscospa5xx_install_locale(firmware.remote_files[0], src_525, dst_525)\n ciscospa5xx_install_locale(firmware.remote_files[1], src_50x, dst_50x)\n return aux\n\n\ndef cisco79xx_install_fw(firmware):\n zipfile_path = firmware.remote_files[0]\n unzip_dir = fetchfw.zip_extract_all(firmware.name, zipfile_path.path)\n distutils.dir_util.copy_tree(unzip_dir, fetchfw.TFTP_PATH)\n\n\ndef cisco79xx_install_locale(xfile, user_locale, create_7905font_file):\n signed_path = xfile.path\n \n # 1. Unsign\n signed_f = open(signed_path, 'rb')\n (unsigned_fd, unsigned_path) = tempfile.mkstemp()\n unsigned_f = os.fdopen(unsigned_fd, 'wb')\n try:\n unsign_from_fileobj(signed_f, unsigned_f)\n finally:\n signed_f.close()\n unsigned_f.close()\n # 2. Extract the first tar\n untar_dir1 = fetchfw.tgz_extract_all('79xx_lang1', unsigned_path)\n # 3. Find the second tar and extract it\n tar_path2 = glob.glob(os.path.join(untar_dir1, '*.tar'))[0]\n untar_dir2 = fetchfw.tar_extract_all('79xx_lang2', tar_path2)\n # 4. Copy the file into tftpboot\n src_base_dir = os.path.join(untar_dir2, 'usr', 'local', 'cm', 'tftp')\n src_dir = user_locale\n dest_dir = user_locale\n locale_path = os.path.join(fetchfw.TFTP_PATH, 'Cisco/i18n', dest_dir)\n distutils.dir_util.copy_tree(os.path.join(src_base_dir, src_dir),\n locale_path)\n # 5. Create an empty 7905-font.xml\n if create_7905font_file:\n path_7905font = os.path.join(locale_path, '7905-font.xml')\n if not os.path.isfile(path_7905font):\n f = open(path_7905font, 'w')\n try:\n f.write('\\n')\n finally:\n f.close()\n\n\ndef cisco79xx_metainstall_locale(user_locale, network_locale, create_7905font_file=False):\n def aux(firmware):\n cisco79xx_install_locale(firmware.remote_files[0], user_locale, create_7905font_file)\n cisco79xx_install_locale(firmware.remote_files[1], network_locale, False)\n return aux\n\n\ncisco_install_map = {\n 'cisco7975_sccp_903':\n cisco79xx_install_fw,\n 'cisco7971_sccp_903':\n cisco79xx_install_fw,\n 'cisco7970_sccp_903':\n cisco79xx_install_fw,\n 'cisco7965_sccp_903':\n cisco79xx_install_fw,\n 'cisco7962_sccp_903':\n cisco79xx_install_fw,\n 'cisco7961_sccp_903':\n cisco79xx_install_fw,\n 'cisco7960_sccp_812':\n cisco79xx_install_fw,\n 'cisco7945_sccp_903':\n cisco79xx_install_fw,\n 'cisco7942_sccp_903':\n cisco79xx_install_fw,\n 'cisco7941_sccp_903':\n cisco79xx_install_fw,\n 'cisco7940_sccp_812':\n cisco79xx_install_fw,\n 'cisco7931_sccp_903':\n cisco79xx_install_fw,\n 'cisco7912_sccp_804':\n cisco79xx_install_fw,\n 'cisco7911_sccp_903':\n cisco79xx_install_fw,\n 'cisco7910_sccp_507':\n cisco79xx_install_fw,\n 'cisco7906_sccp_903':\n cisco79xx_install_fw,\n 'cisco7905_sccp_803':\n cisco79xx_install_fw,\n 'cisco7902_sccp_802':\n cisco79xx_install_fw,\n 'cisco7916_sccp_104':\n cisco79xx_install_fw,\n 'cisco7915_sccp_104':\n cisco79xx_install_fw,\n 'cisco7914_sccp_504':\n cisco79xx_install_fw,\n 'cisco79xx_locale_de_DE':\n cisco79xx_metainstall_locale('german_germany', 'germany', True),\n 'cisco79xx_locale_es_ES':\n cisco79xx_metainstall_locale('spanish_spain', 'spain', True),\n 'cisco79xx_locale_fr_CA':\n cisco79xx_metainstall_locale('french_france', 'canada', True),\n 'cisco79xx_locale_fr_FR':\n cisco79xx_metainstall_locale('french_france', 'france', True),\n 'ciscospa525_748':\n ciscospa5xx_metainstall_fw('spa525g-7-4-8.bin'),\n 'ciscospa509_748':\n ciscospa5xx_metainstall_fw('spa50x-30x-7-4-8a.bin'),\n 'ciscospa508_748':\n ciscospa5xx_metainstall_fw('spa50x-30x-7-4-8a.bin'),\n 'ciscospa504_748':\n ciscospa5xx_metainstall_fw('spa50x-30x-7-4-8a.bin'),\n 'ciscospa502_748':\n ciscospa5xx_metainstall_fw('spa50x-30x-7-4-8a.bin'),\n 'ciscospa501_748':\n ciscospa5xx_metainstall_fw('spa50x-30x-7-4-8a.bin'),\n 'ciscospa5xx_locale_de':\n ciscospa5xx_metainstall_locale('spa525_de_v748.xml',\n 'spa525_de.xml',\n 'spa50x_30x_de_v748.xml',\n 'spa50x_30x_de.xml'),\n 'ciscospa5xx_locale_en':\n ciscospa5xx_metainstall_locale('spa525_en_v748.xml',\n 'spa525_en.xml',\n 'spa50x_30x_en_v748.xml',\n 'spa50x_30x_en.xml'),\n 'ciscospa5xx_locale_es':\n ciscospa5xx_metainstall_locale('spa525_es_v748.xml',\n 'spa525_es.xml',\n 'spa50x_30x_es_v748.xml',\n 'spa50x_30x_es.xml'),\n 'ciscospa5xx_locale_fr':\n ciscospa5xx_metainstall_locale('spa525_fr_v748.xml',\n 'spa525_fr.xml',\n 'spa50x_30x_fr_v748.xml',\n 'spa50x_30x_fr.xml'),\n}\n\n\ndef cisco_install_entry_point(firmware):\n if firmware.name in cisco_install_map:\n cisco_install_map[firmware.name](firmware)\n else:\n raise fetchfw.FirmwareInstallationError()\n\n\nfetchfw.register_install_fn('Cisco', None, cisco_install_entry_point)\nfetchfw.register_install_fn('CiscoSMB', None, cisco_install_entry_point)", "repo_name": "Eyepea/xivo-gallifrey", "sub_path": "fetchfw/xivo_fetchfw/brands/cisco.py", "file_name": "cisco.py", "file_ext": "py", "file_size_in_byte": 8249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "33", "api": [{"api_name": "os.path.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 27, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.TFTP_PATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.TFTP_PATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 28, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.makedirs", "line_number": 57, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 57, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.zip_extract_files", "line_number": 58, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 58, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.zip_extract_all", "line_number": 69, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 69, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.makedirs", "line_number": 70, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 70, "usage_type": "name"}, {"api_name": "shutil.copy", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.zip_extract_all", "line_number": 84, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 84, "usage_type": "name"}, {"api_name": "distutils.dir_util.dir_util.copy_tree", "line_number": 85, "usage_type": "call"}, {"api_name": "distutils.dir_util.dir_util", "line_number": 85, "usage_type": "attribute"}, {"api_name": "distutils.dir_util", "line_number": 85, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.TFTP_PATH", "line_number": 85, "usage_type": "attribute"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 85, "usage_type": "name"}, {"api_name": "tempfile.mkstemp", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.fdopen", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.tgz_extract_all", "line_number": 101, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 101, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 103, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.tar_extract_all", "line_number": 104, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 104, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 106, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 109, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.TFTP_PATH", "line_number": 109, "usage_type": "attribute"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 109, "usage_type": "name"}, {"api_name": "distutils.dir_util.dir_util.copy_tree", "line_number": 110, "usage_type": "call"}, {"api_name": "distutils.dir_util.dir_util", "line_number": 110, "usage_type": "attribute"}, {"api_name": "distutils.dir_util", "line_number": 110, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 110, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 114, "usage_type": "name"}, {"api_name": "os.path.path.isfile", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 115, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.FirmwareInstallationError", "line_number": 220, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 220, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.register_install_fn", "line_number": 223, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 223, "usage_type": "name"}, {"api_name": "xivo_fetchfw.fetchfw.register_install_fn", "line_number": 224, "usage_type": "call"}, {"api_name": "xivo_fetchfw.fetchfw", "line_number": 224, "usage_type": "name"}]} +{"seq_id": "74062387934", "text": "\"\"\"Returns a hello world string!\"\"\"\r\nimport flask\r\nimport metabackend\r\nfrom flask import request\r\nfrom metabackend.api.ai_model import ai_response \r\nfrom metabackend.api.db import find_match, update_conversation_with_profile, find_real_conversation, update_real_conversation, find_practice_conversation, update_practice_conversation, insert_practice_conversation\r\nimport time\r\nimport json\r\nfrom bson import json_util\r\n\r\n@metabackend.app.route('/api/v1/real-conversation//', methods=['GET'])\r\ndef get_real_conversation(real_conversation_id):\r\n real_conversation = find_real_conversation(real_conversation_id, None, None)\r\n match_id = str(real_conversation['match_id'])\r\n match = find_match(match_id)\r\n match_name = match['name']\r\n context = {\r\n 'realConversation': json.loads(json_util.dumps(real_conversation)),\r\n 'matchName': match_name\r\n }\r\n return flask.jsonify(**context)\r\n\r\n\r\n@metabackend.app.route('/api/v1/real-conversation//message/', methods=['POST'])\r\ndef add_message_to_real_conversation(real_conversation_id):\r\n data = request.get_json()\r\n message = data[\"message\"]\r\n is_user = data[\"is_user\"]\r\n\r\n real_conversation = find_real_conversation(real_conversation_id, None, None)\r\n # TODO: Error handling if conversation does not exist\r\n\r\n new_message = {'text': message, 'is_user': is_user}\r\n new_messages = real_conversation['messages']\r\n if new_messages is None:\r\n new_messages = []\r\n new_messages.append(new_message)\r\n update_real_conversation(real_conversation_id, new_messages)\r\n \r\n context = {\r\n 'success': True\r\n }\r\n return flask.jsonify(**context)\r\n\r\n\r\n# TODO: Test all routes and add practice conversation routes\r\n\r\n@metabackend.app.route('/api/v1/practice-conversation//', methods=['GET'])\r\ndef get_practice_conversation(practice_conversation_id):\r\n practice_conversation = find_practice_conversation(practice_conversation_id)\r\n match_id = str(practice_conversation['match_id'])\r\n match = find_match(match_id)\r\n match_name = match['name']\r\n context = {\r\n 'practiceConversation': json.loads(json_util.dumps(practice_conversation)),\r\n 'matchName': match_name\r\n }\r\n return flask.jsonify(**context)\r\n\r\n@metabackend.app.route('/api/v1/practice-conversation//message/', methods=['POST'])\r\ndef add_message_to_practice_conversation(practice_conversation_id):\r\n data = request.get_json()\r\n message = data[\"message\"]\r\n\r\n practice_conversation = find_practice_conversation(practice_conversation_id)\r\n new_message = {'text': message, 'is_user': True}\r\n new_messages = practice_conversation['messages']\r\n if new_messages is None:\r\n new_messages = []\r\n new_messages.append(new_message)\r\n \r\n match_id = str(practice_conversation['match_id'])\r\n match = find_match(match_id)\r\n aiMessage = ai_response(new_messages, match)\r\n aiMessageObject = {'text': aiMessage, 'is_user': False}\r\n new_messages.append(aiMessageObject)\r\n update_practice_conversation(practice_conversation_id, new_messages)\r\n \r\n context = {\r\n 'success': True,\r\n 'aiMessage': aiMessage\r\n }\r\n return flask.jsonify(**context)\r\n\r\n@metabackend.app.route('/api/v1/practice-conversation/', methods=['POST'])\r\ndef create_practice_conversation():\r\n data = request.get_json()\r\n user_id = data[\"userId\"]\r\n match_id = data[\"matchId\"]\r\n\r\n real_conversation = find_real_conversation(None, user_id, match_id)\r\n # print(real_conversation)\r\n messages = real_conversation['messages']\r\n if messages is None:\r\n messages = []\r\n number_of_messages_in_real_conversation = len(messages)\r\n practice_conversation = insert_practice_conversation(match_id, user_id, messages, number_of_messages_in_real_conversation)\r\n practice_conversation_id = str(practice_conversation.inserted_id)\r\n\r\n context = {\r\n 'practiceConversationId': practice_conversation_id\r\n }\r\n return flask.jsonify(**context)\r\n\r\n'''\r\n@metabackend.app.route('/api/v1/getmsg', methods=['POST'])\r\ndef respond_to_message_frontend():\r\n data = request.get_json()\r\n user_message = data[\"userMessage\"]\r\n match_id = data[\"matchId\"]\r\n user_id = data[\"userId\"]\r\n match = find_match(match_id)\r\n\r\n messages = []\r\n\r\n if \"messages\" in match:\r\n messages = match[\"messages\"]\r\n \r\n messages.append(user_message)\r\n profile_message_response = ai_response(messages, match)\r\n messages.append(profile_message_response)\r\n \r\n update_conversation_with_profile(match_id, messages)\r\n\r\n context = {\r\n 'apiMessage': profile_message_response\r\n }\r\n\r\n# profile = {\r\n# 'name': \"Jayce\",\r\n# 'age': '24',\r\n# 'gender': \"male\",\r\n# 'interests': \"Metal, sushi, astrology, space, music\"\r\n# }\r\n\r\n return flask.jsonify(**context)\r\n'''", "repo_name": "JasInCase/Metadating", "sub_path": "metabackend/api/message.py", "file_name": "message.py", "file_ext": "py", "file_size_in_byte": 4945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "33", "api": [{"api_name": "metabackend.api.db.find_real_conversation", "line_number": 13, "usage_type": "call"}, {"api_name": "metabackend.api.db.find_match", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 21, "usage_type": "call"}, {"api_name": "metabackend.app.route", "line_number": 11, "usage_type": "call"}, {"api_name": "metabackend.app", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "metabackend.api.db.find_real_conversation", "line_number": 30, "usage_type": "call"}, {"api_name": "metabackend.api.db.update_real_conversation", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "metabackend.app.route", "line_number": 24, "usage_type": "call"}, {"api_name": "metabackend.app", "line_number": 24, "usage_type": "attribute"}, {"api_name": "metabackend.api.db.find_practice_conversation", "line_number": 50, "usage_type": "call"}, {"api_name": "metabackend.api.db.find_match", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "bson.json_util.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "metabackend.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "metabackend.app", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "metabackend.api.db.find_practice_conversation", "line_number": 65, "usage_type": "call"}, {"api_name": "metabackend.api.db.find_match", "line_number": 73, "usage_type": "call"}, {"api_name": "metabackend.api.ai_model.ai_response", "line_number": 74, "usage_type": "call"}, {"api_name": "metabackend.api.db.update_practice_conversation", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 83, "usage_type": "call"}, {"api_name": "metabackend.app.route", "line_number": 60, "usage_type": "call"}, {"api_name": "metabackend.app", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "metabackend.api.db.find_real_conversation", "line_number": 91, "usage_type": "call"}, {"api_name": "metabackend.api.db.insert_practice_conversation", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 103, "usage_type": "call"}, {"api_name": "metabackend.app.route", "line_number": 85, "usage_type": "call"}, {"api_name": "metabackend.app", "line_number": 85, "usage_type": "attribute"}]} +{"seq_id": "38065321828", "text": "# noinspection PyProtectedMember\nimport pytest\n\nfrom ssanic.http.response.status_line import (\n StatusLine,\n _get_reason_phrase_by_code\n)\n\n\n@pytest.mark.parametrize('status_code,expected', [\n (200, 'OK'),\n (400, 'Bad_Request'),\n (403, 'Forbidden'),\n (404, 'Not Found'),\n (405, 'Method Not Allowed'),\n (0, '')\n])\ndef test_get_reason_phrase_by_code(status_code, expected):\n assert expected == _get_reason_phrase_by_code(status_code)\n\n\n@pytest.mark.parametrize('status_code,expected', [\n (200, ('HTTP/1.1 200 OK', 'STATUS LINE: HTTP/1.1 200 OK')),\n])\ndef test_status_line_constructor(status_code, expected):\n expected_str, expected_repr = expected\n\n sl = StatusLine(status_code)\n\n assert expected_str == str(sl)\n assert expected_repr == repr(sl)\n", "repo_name": "jpyatachkov/ssanic", "sub_path": "tests/http/response/test_status_line.py", "file_name": "test_status_line.py", "file_ext": "py", "file_size_in_byte": 785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "33", "api": [{"api_name": "ssanic.http.response.status_line._get_reason_phrase_by_code", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ssanic.http.response.status_line.StatusLine", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "19749517926", "text": "import urllib2\nimport json\nfrom firebase import firebase\nfrom locationBasedAnalysis import getLocations\nfrom nearestDoctor import getNearestDoctor\n\"\"\"\nCalls HealthService API to find the disease analysis based on symptom ids passed in\nAlso gets the mapped disease description from the API data\n\"\"\"\n\n\nclass UrlBuilder:\n def __init__(self, base):\n if base[-1] != '?':\n raise ValueError(\"Base must end with question mark.\")\n if \"http://\" not in base and \"https://\" not in base:\n raise ValueError(\"http:// not in base\")\n self.base = base\n self.params = []\n self.paramstr = \"\"\n\n def addParam(self, opt, val):\n self.params.append([opt, val])\n\n def getURL(self):\n self.paramstr = \"\"\n for paramset in self.params:\n self.paramstr += str(\"=\".join([str(p) for p in paramset]))\n self.paramstr += \"&\"\n return self.base + self.paramstr\n\n def getBaseURL(self):\n return self.base\n\n def setParam(self, opt, newVal):\n self.params[self.params.index(opt)][1] = newVal\n\n def getParams(self):\n return self.params\n\n\ndef getPotentialDiseasesFromIds(ids, number):\n print(ids)\n\n # Initialize firebase and get User data\n fb = firebase.FirebaseApplication(\"https://medicai-4e398.firebaseio.com/\", None)\n data = fb.get(\"/Users\", None)\n\n # Set up URL for API calling\n URL = \"https://sandbox-healthservice.priaid.ch/diagnosis?\"\n gender = data[number][\"gender\"]\n year_of_birth = str(2017 - int(data[number][\"age\"]))\n # Need to generate new token every 24 hours. Gonna be a pain.\n token = data[\"token\"]\n language = \"en-gb\"\n symptoms = ids\n\n base = UrlBuilder(URL)\n base.addParam(\"symptoms\", \"[\" + ','.join(symptoms) + \"]\")\n base.addParam(\"gender\", gender)\n base.addParam(\"year_of_birth\", year_of_birth)\n base.addParam(\"token\", token)\n base.addParam(\"language\", language)\n\n # Get data from URL\n req = urllib2.Request(base.getURL())\n data = urllib2.urlopen(req).read()\n respjson = json.loads(data.decode(\"utf-8\"))\n\n finalData = \"\"\n counter = 0\n\n # Parse through JSON and get Disease Data + Description\n for i in range(0, len(respjson)):\n if counter == 1:\n break\n finalData += \"Name of disease: \" + str(respjson[i]['Issue']['ProfName']) + \"\\n\\n\"\n finalData += \"Likelihood: \" + str(round(int(respjson[i]['Issue']['Accuracy']), 3)) + \"%\\n\\n\"\n if getLocations(str(respjson[i]['Issue']['ProfName']), number) > 4:\n finalData += \"Warning! We've detected a high number of \" + str(\n respjson[i]['Issue']['ProfName']) + \" cases in your locality (\" + str(\n getLocations(str(respjson[i]['Issue']['ProfName']),\n number)) + \") making the likelihood of this disease much higher.\" + \"\\n\\n\"\n with open('details.json') as data_file:\n data = json.load(data_file)\n finalData += data[str(respjson[i][\"Issue\"][\"ID\"])][\"TreatmentDescription\"] + \"\\n\"\n finalData += \"\\n\" + getNearestDoctor(number)\n counter += 1\n\n return finalData\n", "repo_name": "TheCurryMan/MedicAI", "sub_path": "DiseaseFinder.py", "file_name": "DiseaseFinder.py", "file_ext": "py", "file_size_in_byte": 3149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "33", "api": [{"api_name": "firebase.firebase.FirebaseApplication", "line_number": 46, "usage_type": "call"}, {"api_name": "firebase.firebase", "line_number": 46, "usage_type": "name"}, {"api_name": "urllib2.Request", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "locationBasedAnalysis.getLocations", "line_number": 79, "usage_type": "call"}, {"api_name": "locationBasedAnalysis.getLocations", "line_number": 82, "usage_type": "call"}, {"api_name": "json.load", "line_number": 85, "usage_type": "call"}, {"api_name": "nearestDoctor.getNearestDoctor", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "70262887454", "text": "\"\"\"\nProject Name: Ant Simulator\nFile Name: Combat.py\nAuthor: Lex Hall\nLast Updated: August 2nd, 2018\nPython Version: 2.7\nPygame Version: 1.9.1.win32-py2.7\n\"\"\"\n\nimport sys\nimport pygame\nfrom random import *\nimport Constants as const\n\n\n\n'''\nUsed to run a Combat situation includes information for UI display as well as logic to conclude the combat phase\n'''\nclass CombatController (object):\n def __init__(self, screen):\n self.screen = screen\n self.rect = pygame.Rect(0, 0, const.WIDTH, const.HEIGHT)\n self.textFont = pygame.font.Font(\"pixelplay.ttf\", 50)\n self.antDice = 1\n self.enemyDice = 1\n self.enemyStrength = -1\n self.enemyHealth = -1\n self.antCount = -1\n\n\n ### takes an image file name and runs it til the user clicks\n def runCombatLoop(self, enemyStrength, antCount, antType):\n pygame.mixer.music.pause()\n background = pygame.image.load(const.COMBATWIREFRAME).convert_alpha()\n done = False\n self.antDice = 1\n self.enemyDice = 1\n self.enemyStrength = enemyStrength\n self.enemyHealth = enemyStrength * 100\n self.antCount = antCount\n clock = pygame.time.Clock()\n frameRate = 60\n frameCount = 0\n currentStage = 1\n nextAnimationUpdate = 4\n nextStageUpdate = 60\n while not done:\n # Get event from pygame\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n pygame.quit()\n sys.exit()\n\n # Update\n if (frameCount == nextAnimationUpdate and currentStage == 1):\n self.antDice = randint(1,6)\n self.enemyDice = randint(1,6)\n nextAnimationUpdate += 4\n\n else:\n if self.antCount < 0 or self.enemyHealth < 0:\n #TODO add animation\n pygame.time.wait(5000)\n done = True\n\n if (frameCount == nextStageUpdate):\n if currentStage == 1:\n antDice = randint(1,6)\n enemyDice = randint(1,6)\n if self.antDice > self.enemyDice:\n self.enemyHealth -= (antDice * self.antCount)\n else:\n self.antCount -= (self.enemyStrength * self.enemyDice)\n currentStage += 1\n if currentStage == 6:\n currentStage = 1\n\n # Draw\n self.screen.blit(background, self.rect)\n self.screen.blit(self.textFont.render(str(self.antCount), True, const.BLACK), const.ANTCOUNTBOX)\n self.screen.blit(self.textFont.render(str(self.enemyHealth), True, const.BLACK), const.ENEMYHEALTHBOX)\n if currentStage == 1:\n self.screen.blit(self.textFont.render(str(self.antDice), True, const.BLACK), const.ANTDICEBOX)\n self.screen.blit(self.textFont.render(str(self.enemyDice), True, const.BLACK), const.ENEMYDICEBOX)\n else:\n if self.antDice > self.enemyDice:\n self.screen.blit(self.textFont.render(str(self.antDice), True, const.RED), const.ANTDICEBOX)\n self.screen.blit(self.textFont.render(str(self.enemyDice), True, const.BLACK), const.ENEMYDICEBOX)\n else:\n self.screen.blit(self.textFont.render(str(self.antDice), True, const.BLACK), const.ANTDICEBOX)\n self.screen.blit(self.textFont.render(str(self.enemyDice), True, const.RED), const.ENEMYDICEBOX)\n\n # Increment Frames/Ticks\n frameCount += 1\n\n # Reset frames and seconds every 30 frames to avoid numbers becoming too large\n if (frameCount == 61):\n frameCount = 1\n nextAnimationUpdate = 4\n\n # Throttle frame rate\n clock.tick(frameRate)\n\n # Flip Display\n pygame.display.flip()\n \n return self.enemyHealth < 0\n\n def getAntCountAfterCombat(self):\n return self.antCount\n\n\"\"\"\nCombat starts\nLoad image for background\nRoll Dice Animation\nCalculate 2 \"Dice\" (Rand 1-6)\nmake changes to totals\ndisplay results for 5 seconds\nrepeat steps 3-6 until ants are dead or enemy is dead\nupdate deaths with UI\n\"\"\"", "repo_name": "AnubisAbydos/Ant-Simulator", "sub_path": "Ant-Simulator/Ant-Simulator/Combat.py", "file_name": "Combat.py", "file_ext": "py", "file_size_in_byte": 4350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pygame.Rect", "line_number": 23, "usage_type": "call"}, {"api_name": "Constants.WIDTH", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Constants.HEIGHT", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.pause", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 35, "usage_type": "attribute"}, {"api_name": "Constants.COMBATWIREFRAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.time.wait", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 65, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 82, "usage_type": "attribute"}, {"api_name": "Constants.ANTCOUNTBOX", "line_number": 82, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Constants.ENEMYHEALTHBOX", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Constants.ANTDICEBOX", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 86, "usage_type": "attribute"}, {"api_name": "Constants.ENEMYDICEBOX", "line_number": 86, "usage_type": "attribute"}, {"api_name": "Constants.RED", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Constants.ANTDICEBOX", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 90, "usage_type": "attribute"}, {"api_name": "Constants.ENEMYDICEBOX", "line_number": 90, "usage_type": "attribute"}, {"api_name": "Constants.BLACK", "line_number": 92, "usage_type": "attribute"}, {"api_name": "Constants.ANTDICEBOX", "line_number": 92, "usage_type": "attribute"}, {"api_name": "Constants.RED", "line_number": 93, "usage_type": "attribute"}, {"api_name": "Constants.ENEMYDICEBOX", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 107, "usage_type": "attribute"}]} +{"seq_id": "22989593427", "text": "import os\nimport time\nimport cv2\nimport numpy as np\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data\nimport torch.multiprocessing as mp\nfrom model.Lite_FENet import Lite_FENet\nfrom util import dataset, transform, config\nfrom util.util import AverageMeter, poly_learning_rate, intersectionAndUnionGPU, check_makedirs, get_logger, set_seed, \\\n close_gradient, get_optim, get_model_para_number\n\ncv2.ocl.setUseOpenCL(False)\ncv2.setNumThreads(0)\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')\n parser.add_argument('--config', type=str, default='config/pascal/pascal_split0_resnet50.yaml', help='config file path')\n parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n assert args.config is not None\n cfg = config.load_cfg_from_cfg_file(args.config)\n if args.opts is not None:\n cfg = config.merge_cfg_from_list(cfg, args.opts)\n return cfg\n\n\ndef main_process():\n return (not args.multiprocessing_distributed) or (args.multiprocessing_distributed and args.rank % args.ngpus_per_node == 0)\n\n\ndef main():\n args = get_parser()\n assert args.classes > 1\n assert args.zoom_factor in [1, 2, 4, 8]\n assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.train_gpu)\n\n if args.manual_seed is not None:\n set_seed(args.manual_seed, deterministic=False)\n\n # multi-processing training is deprecated\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n args.ngpus_per_node = len(args.train_gpu)\n if len(args.train_gpu) == 1:\n args.sync_bn = False # sync_bn is deprecated\n args.distributed = False\n args.multiprocessing_distributed = False\n if args.multiprocessing_distributed:\n args.world_size = args.ngpus_per_node * args.world_size\n mp.spawn(main_worker, nprocs=args.ngpus_per_node, args=(args.ngpus_per_node, args))\n else:\n main_worker(args)\n\n\ndef main_worker(argss):\n global args, logger\n args = argss\n check_makedirs(args.save_path)\n logger = get_logger(args.save_path + '/train-{}shot.log'.format(args.shot))\n\n model = Lite_FENet(layers=args.layers, classes=2, zoom_factor=8,\n criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d,\n backbone_pretrained=True, shot=args.shot, scales=args.scales, vgg=args.vgg)\n\n logger.info(\"=> Creating model ...\")\n print(args)\n\n optimizer = get_optim(model, args)\n\n # freeze backbone\n close_gradient(model)\n\n model = model.cuda()\n if args.resume:\n if os.path.isfile(args.resume):\n logger.info(\"=> Loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda())\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch']))\n else:\n logger.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # get model params number\n total_number, learnable_number = get_model_para_number(model)\n logger.info('Number of Parameters: {}'.format(total_number))\n logger.info('Number of Learnable Parameters: {}'.format(learnable_number))\n\n # ---------------------- DATASET ----------------------\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n assert args.split in [0, 1, 2, 3, 999]\n # Train\n train_transform = transform.Compose([\n transform.RandScale([args.scale_min, args.scale_max]),\n transform.RandRotate([args.rotate_min, args.rotate_max], padding=mean, ignore_label=args.padding_label),\n transform.RandomGaussianBlur(),\n transform.RandomHorizontalFlip(),\n transform.Crop([args.train_h, args.train_w], crop_type='rand', padding=mean, ignore_label=args.padding_label),\n transform.ToTensor(),\n transform.Normalize(mean=mean, std=std)])\n\n train_data = dataset.SemData(split=args.split, shot=args.shot, data_root=args.data_root, data_list=args.train_list,\n transform=train_transform, mode='train',\n use_coco=args.use_coco, use_split_coco=args.use_split_coco)\n\n train_sampler = None\n train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler,\n drop_last=True)\n # Val\n if args.evaluate:\n if args.resized_val:\n val_transform = transform.Compose([\n transform.Resize(size=args.val_size),\n transform.ToTensor(),\n transform.Normalize(mean=mean, std=std)])\n else:\n val_transform = transform.Compose([\n transform.test_Resize(size=args.val_size),\n transform.ToTensor(),\n transform.Normalize(mean=mean, std=std)])\n\n val_data = dataset.SemData(split=args.split, shot=args.shot, data_root=args.data_root, data_list=args.val_list,\n transform=val_transform, mode='val',\n use_coco=args.use_coco, use_split_coco=args.use_split_coco)\n\n val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_val, shuffle=False,\n num_workers=args.workers, pin_memory=True, sampler=None)\n\n max_iou = 0.\n filename = 'Lite-FENet.pth'\n pthname = 'epoch_0.pth'\n\n for epoch in range(args.start_epoch, args.epochs):\n epoch_log = epoch + 1\n\n if args.fix_random_seed_val:\n set_seed(args.manual_seed + epoch, deterministic=False)\n\n # ---------------------- TRAIN ----------------------\n loss_train, mIoU_train, mAcc_train, allAcc_train = train(train_loader, model, optimizer, epoch)\n\n # save model \n if (epoch % args.save_freq == 0) and (epoch > 0):\n if os.path.exists(pthname):\n os.remove(pthname)\n pthname = args.save_path + '/epoch_' + str(epoch) + '.pth'\n logger.info('Saving checkpoint to: ' + pthname)\n torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, pthname)\n\n # ----------------------- VAL -----------------------\n if args.evaluate and (epoch % 1 == 0 or (args.epochs <= 50 and epoch % 1 == 0)):\n\n loss_val, mIoU_val, mAcc_val, allAcc_val, class_miou = validate(val_loader, model)\n # save the best model \n if class_miou > max_iou:\n max_iou = class_miou\n if os.path.exists(filename):\n os.remove(filename)\n filename = args.save_path + '/train_epoch_' + str(epoch) + '_' + str(round(max_iou, 4)) + '.pth'\n logger.info('Saving checkpoint to: ' + filename)\n torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()},\n filename)\n\n\ndef train(train_loader, model, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n main_loss_meter = AverageMeter()\n aux_loss_meter = AverageMeter()\n loss_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n\n model.train()\n end = time.time()\n max_iter = args.epochs * len(train_loader)\n print('Warmup: {}'.format(args.warmup))\n\n for i, (input, target, s_input, s_mask, subcls) in enumerate(train_loader):\n data_time.update(time.time() - end)\n current_iter = epoch * len(train_loader) + i + 1\n index_split = -1\n if args.base_lr > 1e-6:\n poly_learning_rate(optimizer, args.base_lr, current_iter, max_iter, power=args.power,\n index_split=index_split, warmup=args.warmup, warmup_step=len(train_loader) // 2)\n\n s_input = s_input.cuda(non_blocking=True)\n s_mask = s_mask.cuda(non_blocking=True)\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n output, main_loss, aux_loss = model(s_x=s_input, s_y=s_mask, x=input, y=target)\n loss = main_loss + args.aux_weight * aux_loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n n = input.size(0) # batch size\n\n intersection, union, target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)\n intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()\n intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)\n\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n main_loss_meter.update(main_loss.item(), n)\n aux_loss_meter.update(aux_loss.item(), n)\n loss_meter.update(loss.item(), n)\n batch_time.update(time.time() - end)\n end = time.time()\n\n remain_iter = max_iter - current_iter\n remain_time = remain_iter * batch_time.avg\n t_m, t_s = divmod(remain_time, 60)\n t_h, t_m = divmod(t_m, 60)\n remain_time = 'Remain_time {:02d}:{:02d}:{:02d}'.format(int(t_h), int(t_m), int(t_s))\n\n if (i + 1) % args.print_freq == 0 and main_process():\n logger.info('Epoch: [{}/{}][{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Remain {remain_time} '\n 'MainLoss {main_loss_meter.val:.4f} '\n 'AuxLoss {aux_loss_meter.val:.4f} '\n 'Loss {loss_meter.val:.4f} '\n 'Accuracy {accuracy:.4f}.'.format(epoch + 1, args.epochs, i + 1, len(train_loader),\n batch_time=batch_time,\n data_time=data_time,\n remain_time=remain_time,\n main_loss_meter=main_loss_meter,\n aux_loss_meter=aux_loss_meter,\n loss_meter=loss_meter,\n accuracy=accuracy))\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class) # FB-IoU\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n logger.info('Train result at epoch [{}/{}]: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(epoch, args.epochs, mIoU,\n mAcc, allAcc))\n for i in range(args.classes):\n logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))\n\n return main_loss_meter.avg, mIoU, mAcc, allAcc\n\n\ndef validate(val_loader, model):\n logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n batch_time = AverageMeter()\n model_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n if args.use_coco:\n split_gap = 20\n test_num = 20000\n else:\n split_gap = 5\n test_num = 2000\n\n class_intersection_meter = [0] * split_gap\n class_union_meter = [0] * split_gap\n if args.manual_seed is not None and args.fix_random_seed_val:\n set_seed(args.manual_seed, False)\n\n criterion = nn.CrossEntropyLoss(ignore_index=args.ignore_label)\n model.eval()\n\n end = time.time()\n assert test_num % args.batch_size_val == 0\n iter_num = 0\n total_time = 0\n\n for e in range(10):\n for i, (input, target, s_input, s_mask, subcls, ori_label) in enumerate(val_loader):\n if iter_num * args.batch_size_val >= test_num:\n break\n iter_num += 1\n data_time.update(time.time() - end)\n\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n s_input = s_input.cuda(non_blocking=True)\n s_mask = s_mask.cuda(non_blocking=True)\n ori_label = ori_label.cuda(non_blocking=True)\n\n start_time = time.time()\n with torch.no_grad():\n output = model(s_x=s_input, s_y=s_mask, x=input, y=target)\n total_time = total_time + 1\n model_time.update(time.time() - start_time)\n\n if args.ori_resize:\n longerside = max(ori_label.size(1), ori_label.size(2))\n backmask = torch.ones(ori_label.size(0), longerside, longerside).cuda() * 255\n backmask[0, :ori_label.size(1), :ori_label.size(2)] = ori_label\n target = backmask.clone().long()\n\n output = F.interpolate(output, size=target.size()[1:], mode='bilinear', align_corners=True)\n loss = criterion(output, target)\n\n n = input.size(0)\n loss = torch.mean(loss)\n output = output.max(1)[1]\n\n intersection, union, new_target = intersectionAndUnionGPU(output, target, args.classes, args.ignore_label)\n intersection, union, target, new_target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy(), new_target.cpu().numpy()\n intersection_meter.update(intersection), union_meter.update(union), target_meter.update(new_target)\n\n subcls = subcls[0].cpu().numpy()[0]\n class_intersection_meter[subcls] += intersection[1]\n class_union_meter[subcls] += union[1]\n\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n loss_meter.update(loss.item(), input.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n if ((i + 1) % round((test_num / 100)) == 0) and main_process():\n logger.info('Test: [{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '\n 'Accuracy {accuracy:.4f}.'.format(iter_num * args.batch_size_val, test_num,\n data_time=data_time,\n batch_time=batch_time,\n loss_meter=loss_meter,\n accuracy=accuracy))\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class) # FB-IoU\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n\n class_iou_class = []\n class_miou = 0\n for i in range(len(class_intersection_meter)):\n class_iou = class_intersection_meter[i] / (class_union_meter[i] + 1e-10)\n class_iou_class.append(class_iou)\n class_miou += class_iou\n class_miou = class_miou * 1.0 / len(class_intersection_meter)\n logger.info('meanIoU---Val result: mIoU {:.4f}.'.format(class_miou))\n for i in range(split_gap):\n logger.info('Class_{} Result: iou {:.4f}.'.format(i + 1, class_iou_class[i]))\n\n logger.info('FBIoU---Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))\n for i in range(args.classes):\n logger.info('Class_{} Result: iou/accuracy {:.4f}/{:.4f}.'.format(i, iou_class[i], accuracy_class[i]))\n logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')\n\n print('avg inference time: {:.4f}, count: {}'.format(model_time.avg, test_num))\n return loss_meter.avg, mIoU, mAcc, allAcc, class_miou\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Sunbaoquan/Lite-FENet", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 17008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "cv2.ocl.setUseOpenCL", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.ocl", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.setNumThreads", "line_number": 17, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "argparse.REMAINDER", "line_number": 23, "usage_type": "attribute"}, {"api_name": "util.config.load_cfg_from_cfg_file", "line_number": 26, "usage_type": "call"}, {"api_name": "util.config", "line_number": 26, "usage_type": "name"}, {"api_name": "util.config.merge_cfg_from_list", "line_number": 28, "usage_type": "call"}, {"api_name": "util.config", "line_number": 28, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 42, "usage_type": "attribute"}, {"api_name": "util.util.set_seed", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.spawn", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 58, "usage_type": "name"}, {"api_name": "util.util.check_makedirs", "line_number": 66, "usage_type": "call"}, {"api_name": "util.util.get_logger", "line_number": 67, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 69, "usage_type": "name"}, {"api_name": "model.Lite_FENet.Lite_FENet", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 70, "usage_type": "attribute"}, {"api_name": "util.util.get_optim", "line_number": 76, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 76, "usage_type": "argument"}, {"api_name": "util.util.close_gradient", "line_number": 79, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 79, "usage_type": "argument"}, {"api_name": "model.Lite_FENet", "line_number": 81, "usage_type": "name"}, {"api_name": "model.Lite_FENet.cuda", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 85, "usage_type": "call"}, {"api_name": "model.Lite_FENet.load_state_dict", "line_number": 87, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 87, "usage_type": "name"}, {"api_name": "util.util.get_model_para_number", "line_number": 94, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 94, "usage_type": "argument"}, {"api_name": "util.transform.Compose", "line_number": 106, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 106, "usage_type": "name"}, {"api_name": "util.transform.RandScale", "line_number": 107, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 107, "usage_type": "name"}, {"api_name": "util.transform.RandRotate", "line_number": 108, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 108, "usage_type": "name"}, {"api_name": "util.transform.RandomGaussianBlur", "line_number": 109, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 109, "usage_type": "name"}, {"api_name": "util.transform.RandomHorizontalFlip", "line_number": 110, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 110, "usage_type": "name"}, {"api_name": "util.transform.Crop", "line_number": 111, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 111, "usage_type": "name"}, {"api_name": "util.transform.ToTensor", "line_number": 112, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 112, "usage_type": "name"}, {"api_name": "util.transform.Normalize", "line_number": 113, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 113, "usage_type": "name"}, {"api_name": "util.dataset.SemData", "line_number": 115, "usage_type": "call"}, {"api_name": "util.dataset", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 120, "usage_type": "attribute"}, {"api_name": "util.transform.Compose", "line_number": 126, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 126, "usage_type": "name"}, {"api_name": "util.transform.Resize", "line_number": 127, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 127, "usage_type": "name"}, {"api_name": "util.transform.ToTensor", "line_number": 128, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 128, "usage_type": "name"}, {"api_name": "util.transform.Normalize", "line_number": 129, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 129, "usage_type": "name"}, {"api_name": "util.transform.Compose", "line_number": 131, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 131, "usage_type": "name"}, {"api_name": "util.transform.test_Resize", "line_number": 132, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 132, "usage_type": "name"}, {"api_name": "util.transform.ToTensor", "line_number": 133, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 133, "usage_type": "name"}, {"api_name": "util.transform.Normalize", "line_number": 134, "usage_type": "call"}, {"api_name": "util.transform", "line_number": 134, "usage_type": "name"}, {"api_name": "util.dataset.SemData", "line_number": 136, "usage_type": "call"}, {"api_name": "util.dataset", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 140, "usage_type": "attribute"}, {"api_name": "util.util.set_seed", "line_number": 151, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 154, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 162, "usage_type": "call"}, {"api_name": "model.Lite_FENet.state_dict", "line_number": 162, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 162, "usage_type": "name"}, {"api_name": "model.Lite_FENet", "line_number": 167, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 175, "usage_type": "call"}, {"api_name": "model.Lite_FENet.state_dict", "line_number": 175, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 175, "usage_type": "name"}, {"api_name": "util.util.AverageMeter", "line_number": 180, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 181, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 182, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 183, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 184, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 185, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 186, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 187, "usage_type": "call"}, {"api_name": "model.Lite_FENet.train", "line_number": 189, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 189, "usage_type": "name"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "util.util.poly_learning_rate", "line_number": 199, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 207, "usage_type": "call"}, {"api_name": "util.util.intersectionAndUnionGPU", "line_number": 215, "usage_type": "call"}, {"api_name": "time.time", "line_number": 223, "usage_type": "call"}, {"api_name": "time.time", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 252, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 264, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 265, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 266, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 267, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 268, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 269, "usage_type": "call"}, {"api_name": "util.util.AverageMeter", "line_number": 270, "usage_type": "call"}, {"api_name": "util.util.set_seed", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 283, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 283, "usage_type": "name"}, {"api_name": "model.Lite_FENet.eval", "line_number": 284, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 284, "usage_type": "name"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "time.time", "line_number": 296, "usage_type": "call"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 305, "usage_type": "call"}, {"api_name": "model.Lite_FENet", "line_number": 306, "usage_type": "call"}, {"api_name": "time.time", "line_number": 308, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 316, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 320, "usage_type": "call"}, {"api_name": "util.util.intersectionAndUnionGPU", "line_number": 323, "usage_type": "call"}, {"api_name": "time.time", "line_number": 333, "usage_type": "call"}, {"api_name": "time.time", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 349, "usage_type": "call"}]} +{"seq_id": "39486026497", "text": "#!/usr/bin/env python3\n\n\"\"\"\n Removes directories and files that match provided regexes\n \n Examples:\n * remove all entries under the .foo and .bar directories\n %(prog)s data/\\.foo.* data/\\.bar.*\n * remove vim temp file(s)\n %(prog)s data/.*\\.swp\n\"\"\"\n\nimport argparse\nimport fnmatch\nimport hashlib\nimport os\nimport re\nimport shutil\nimport sys\n\nclass MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):\n pass\n\ndef getHashAlgorithm(file):\n hash_algorithm = os.path.splitext(file)[0].split('-')[1]\n return hash_algorithm\n\ndef createModifiedManifest(manifest, regexes, *, dryrun=False):\n hash_algorithm = getHashAlgorithm(manifest)\n original_manifest_file = open(manifest, 'r')\n modified_manifest_name = 'manifest-' + hash_algorithm + '.txt.tmp'\n modified_manifest_file = open(modified_manifest_name, 'w')\n modified = False\n for regex in regexes:\n matcher = re.compile(regex)\n for line in original_manifest_file:\n file_entry = line.split(' ', 1)[1]\n if matcher.search(file_entry):\n if dryrun:\n print(\"Would have removed [%s] from manifest because it matches regex [%s]\" % (line.rstrip(), regex))\n else:\n print(\"Removing [%s] from manifest because it matches regex [%s]\" % (line.rstrip(), regex))\n modified = True\n else:\n modified_manifest_file.write(line)\n \n original_manifest_file.close()\n modified_manifest_file.close()\n \n return modified_manifest_name, modified\n\ndef calculateHash(file):\n hash_algorithm = getHashAlgorithm(file)\n file_handle = open(file, 'rb')\n hasher = hashlib.new(hash_algorithm)\n buf = file_handle.read()\n file_handle.close()\n hasher.update(buf)\n hash = hasher.hexdigest()\n \n return hash\n\ndef updateTagmanifest(updated_manifest_hash):\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, 'tagmanifest-*.txt'):\n hash_algorithm = getHashAlgorithm(file)\n original_tagmanifest = open(file)\n modified_tagmanifest_name = \"tagmanifest-\" + hash_algorithm + \".txt.tmp\"\n modified_tagmanifest = open(modified_tagmanifest_name, \"w\")\n for line in original_tagmanifest:\n if \"manifest\" in line:\n modified_tagmanifest.write(updated_manifest_hash + \" \" + file + os.linesep)\n found_file_to_update = True\n else:\n modified_tagmanifest.write(line)\n \n original_tagmanifest.close()\n modified_tagmanifest.close()\n shutil.move(modified_tagmanifest_name, file)\n\ndef removeMatching(regex, starting_dir, *, dryrun=False):\n for dir_name, sub_dir_list, file_list in os.walk(starting_dir):\n matcher = re.compile(regex)\n if matcher.search(dir_name):\n if dryrun:\n print(\"Would have removed directory[%s] from filesystem cause it matches regex [%s]\"%(dir_name, regex))\n elif os.path.islink(dir_name):\n print(\"Removing link to directory [%s] from filesystem cause it matches regex [%s]\" % (dir_name, regex))\n os.unlink(dir_name)\n else:\n print(\"Removing directory [%s] from filesystem cause it matches regex [%s]\" %(dir_name, regex))\n shutil.rmtree(dir_name, ignore_errors=True)\n else:\n for filename in file_list:\n if matcher.search(filename):\n if dryrun:\n print(\"Would have removed file [%s] from filesystem cause it matches regex [%s]\" % (filename, regex))\n elif os.path.islink(filename):\n print(\"Removing link to file [%s] from filesystem cause it matches regex [%s]\" %(filename, regex))\n os.unlink(filename)\n else:\n print(\"Removing file [%s] from filesystem cause it matches regex [%s]\" % (filename, regex))\n os.remove(os.path.join(dir_name, filename))\n\ndef removeAllMatching(regexes, starting_dir, *, dryrun=False):\n for regex in regexes:\n removeMatching(regex, starting_dir, dryrun=dryrun)\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__.strip(), formatter_class=MyFormatter)\n parser.add_argument('regexes', help='Python regexes to match against in bag manifest.', nargs='+')\n parser.add_argument(\"-d\", \"--dryrun\", help=\"Don't actually modify bag.\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--remove-matching\", help=\"Also remove (delete) the matching file(s) from the filesystem.\", action=\"store_false\")\n args = parser.parse_args()\n \n found_manifest_file = False\n starting_dir = os.getcwd()\n for file in os.listdir(starting_dir):\n if fnmatch.fnmatch(file, 'manifest-*.txt'):\n found_manifest_file = True\n new_manifest_file, differs = createModifiedManifest(file, args.regexes, dryrun=args.dryrun)\n if differs and not args.dryrun:\n shutil.move(new_manifest_file, file)\n hash = calculateHash(file)\n updateTagmanifest(hash)\n else:\n os.remove(new_manifest_file)\n removeAllMatching(args.regexes, starting_dir, dryrun=args.dryrun)\n \n if not found_manifest_file:\n print(\"Could not find any manifest file. Are you in the bag directory?\")\n \nif __name__ == \"__main__\":\n main()\n", "repo_name": "LibraryOfCongress/bagger", "sub_path": "bagger/scripts/baggerLinter.py", "file_name": "baggerLinter.py", "file_ext": "py", "file_size_in_byte": 5559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 113, "dataset": "github-code", "pt": "33", "api": [{"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 21, "usage_type": "attribute"}, {"api_name": "argparse.RawTextHelpFormatter", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}, {"api_name": "hashlib.new", "line_number": 55, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 64, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 65, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 72, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 79, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 82, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.islink", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 89, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.islink", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 100, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 110, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 117, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 118, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 119, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 123, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "71394368734", "text": "import cv2\nimport numpy as np\nimport os\n\n#CCTV - 1080*960(read by openCV)\n#resized to 534*400(after trimming the time stamp)\n# cv2.namedWindow(\"output\", cv2.WINDOW_NORMAL) \n# cv2.resizeWindow(\"output\", 1920, 1080)\ncap = cv2.VideoCapture('rtsp://admin:admin123@192.168.0.104:554/')\n# cap = cv2.VideoCapture(\"../Friends_ross.mp4\")\n# print(\"after\")\nwhile(True):\n ret, frame = cap.read()\n # frame = frame[:][100:]\n # frame = cv2.resize(frame, (534,400))\n cv2.imshow(\"output\",frame)\n \n print(frame.shape)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n", "repo_name": "Siddharthm10/Face_Recognition_and_detection", "sub_path": "camera_access/access_CCTV.py", "file_name": "access_CCTV.py", "file_ext": "py", "file_size_in_byte": 611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "cv2.VideoCapture", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "33865965431", "text": "from transformers import (\n Trainer,\n TrainingArguments,\n AutoModelForSequenceClassification,\n AutoTokenizer\n)\nfrom datasets import load_dataset\nfrom transformers.integrations import MLflowCallback\nimport argparse\nimport os\nimport pandas as pd\nfrom myDataset import myDataset\nimport nltk\n\n\"\"\"\nThis is a training script to be uploaded to Azure.\nIn this version articles that are too long are chopped up into smaller fragments. \nUsing myDataset class here that subclasses torch.utils.data.Dataset.\nShould probably abstract away the data chopping to myDataset to keep this training script about training.\n\"\"\"\n\ndef get_chunks(longlist):\n #you should probably pad the last chunk or maybe just ignore chunks that are way too small\n for i in range(0, len(longlist), 512):\n yield longlist[i:i + 512]\n\n#you should probably pad the last chunk or maybe just ignore chunks that are way too small\ndef chop_article(token_list, tokenizer, token_types, attention_mask):\n token_list=token_list[1:]\n #print(token_list)\n text=tokenizer.decode(token_list)\n #use this to turn text into list of sentences\n text_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences=text_tokenizer.tokenize(text)\n start_indices=[0]\n global_index=0\n local_count=0\n long_sentence_flag=0\n for i, sentence in enumerate(sentences):\n #print(sentence)\n encoded_sentence=tokenizer.encode(sentence)\n #print(encoded_sentence)\n #print(len(encoded_sentence))\n #print('\\n')\n cur_len=len(encoded_sentence)-2 #note that encode gives a start symbol (101) and end symbol (102)\n local_count=local_count+cur_len\n if local_count>510:\n start_indices.append(global_index)\n local_count=cur_len\n if cur_len>512:\n #print(\"got a super long sentence\")\n #print(sentence)\n long_sentence_flag=1\n break\n global_index=global_index+cur_len\n if long_sentence_flag==0:\n start_indices.append(len(token_list))\n article_chunks=[]\n type_chunks=[]\n mask_chunks=[]\n for i in range(len(start_indices)-1):\n article_chunks.append(token_list[start_indices[i]:start_indices[i+1]])\n article_chunks[i].insert(0, 101)\n article_chunks[i].append(102)\n article_chunks[i].extend([0]*(512-len(article_chunks[i])))\n #print(f\"length of article chunk is {len(article_chunks[i])}\")\n type_chunks.append(token_types[start_indices[i]:start_indices[i+1]])\n type_chunks[i].insert(0, 0)\n type_chunks[i].append(0)\n type_chunks[i].extend([0]*(512-len(type_chunks[i])))\n #print(f\"length of type chunk is {len(type_chunks[i])}\")\n mask_chunks.append(attention_mask[start_indices[i]:start_indices[i+1]])\n mask_chunks[i].insert(0, 1)\n mask_chunks[i].append(1)\n mask_chunks[i].extend([0]*(512-len(mask_chunks[i])))\n #print(f\"length of type chunk is {len(type_chunks[i])}\")\n return article_chunks, type_chunks, mask_chunks\n\n\nif __name__ == \"__main__\":\n nltk.download('punkt')\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--datapath\")\n parser.add_argument(\"--output_dir\")\n args = parser.parse_args()\n\n #initialize the tokenizer and model - using pretrained bert-base-cased - see if there's a more specific fine-tuned model in the model database that applies to our task\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n model=AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\",num_labels=4)\n\n #classes=[\"fact\",\"factual analysis\",\"opinion\",\"selective-incomplete\"]\n\n train_path = os.path.join(args.datapath,\"train.csv\")\n eval_path = os.path.join(args.datapath,\"eval.csv\")\n\n train_data = load_dataset('csv', data_files=train_path, split='train')\n eval_data = load_dataset('csv', data_files=eval_path, split='train')\n\n X_train = list(train_data['article'])\n y_train = list(train_data['label'])\n print(f\"Train Dataset has length:{len(X_train)}\\n\")\n X_eval = list(eval_data['article'])\n y_eval = list(eval_data['label'])\n print(f\"Eval Dataset has length:{len(X_eval)}\\n\")\n\n #note, turning the tokens into torch tensors in the myDataset class\n X_train_encoded=tokenizer(X_train,truncation=False, padding='max_length')\n X_eval_encoded=tokenizer(X_eval,truncation=False, padding='max_length')\n\n idx=0\n while idx512:\n long_text=X_train_encoded['input_ids'][idx]\n token_types=X_train_encoded['token_type_ids'][idx]\n attention_mask=X_train_encoded['attention_mask'][idx]\n article_chunks, type_chunks, mask_chunks=chop_article(long_text,tokenizer,token_types,attention_mask)\n del X_train_encoded['input_ids'][idx]\n X_train_encoded['input_ids'][idx:idx]=article_chunks\n y_label=y_train[idx]\n y_train[idx:idx]=[y_label]*len(article_chunks)\n del X_train_encoded['token_type_ids'][idx]\n X_train_encoded['token_type_ids'][idx:idx]=type_chunks\n del X_train_encoded['attention_mask'][idx]\n X_train_encoded['attention_mask'][idx:idx]=mask_chunks\n idx=idx+len(article_chunks)\n else:\n idx=idx+1\n\n #break up the long articles in eval set\n idx=0\n while idx512:\n long_text=X_eval_encoded['input_ids'][idx]\n token_types=X_eval_encoded['token_type_ids'][idx]\n attention_mask=X_eval_encoded['attention_mask'][idx]\n article_chunks, type_chunks, mask_chunks=chop_article(long_text,tokenizer,token_types,attention_mask)\n del X_eval_encoded['input_ids'][idx]\n X_eval_encoded['input_ids'][idx:idx]=article_chunks\n y_label=y_eval[idx]\n y_eval[idx:idx]=[y_label]*len(article_chunks)\n del X_eval_encoded['token_type_ids'][idx]\n X_eval_encoded['token_type_ids'][idx:idx]=type_chunks\n del X_eval_encoded['attention_mask'][idx]\n X_eval_encoded['attention_mask'][idx:idx]=mask_chunks\n idx=idx+len(article_chunks)\n else:\n idx=idx+1\n\n train_dataset=myDataset(X_train_encoded,y_train)\n eval_dataset=myDataset(X_eval_encoded,y_eval)\n\n print(\"\\nDone loading and formatting datasets.\\n\")\n\n training_args = TrainingArguments(\n output_dir=args.output_dir,\n overwrite_output_dir=True,\n num_train_epochs=1,\n learning_rate=1e-5,\n weight_decay=0.01,\n per_device_train_batch_size=1,\n per_device_eval_batch_size=1,\n evaluation_strategy=\"epoch\", #or change to steps and set eval_steps=int (default=logging_steps)\n logging_strategy=\"steps\", #or change to steps and set logging_steps=int (default=500)\n logging_dir='./logs',\n logging_steps=500,\n save_strategy=\"epoch\", #or change to steps and set save_steps=int (default=500)\n )\n\n print(\"\\nSpecified Training Args.\\n\")\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n ) \n\n print(\"\\nInstantiated Trainer.\\n\")\n\n #what does this do? Look it up\n trainer.pop_callback(MLflowCallback)\n\n print(\"Training!\\n\")\n train_result = trainer.train()\n trainer.save_model()\n print(\"Done!\")", "repo_name": "taircode/newsbias", "sub_path": "code/train_cloud_torchdataset.py", "file_name": "train_cloud_torchdataset.py", "file_ext": "py", "file_size_in_byte": 7933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "nltk.data.load", "line_number": 33, "usage_type": "call"}, {"api_name": "nltk.data", "line_number": 33, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 81, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 83, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 89, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 89, "usage_type": "name"}, {"api_name": "transformers.AutoModelForSequenceClassification.from_pretrained", "line_number": 90, "usage_type": "call"}, {"api_name": "transformers.AutoModelForSequenceClassification", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "datasets.load_dataset", "line_number": 97, "usage_type": "call"}, {"api_name": "datasets.load_dataset", "line_number": 98, "usage_type": "call"}, {"api_name": "myDataset.myDataset", "line_number": 160, "usage_type": "call"}, {"api_name": "myDataset.myDataset", "line_number": 161, "usage_type": "call"}, {"api_name": "transformers.TrainingArguments", "line_number": 165, "usage_type": "call"}, {"api_name": "transformers.Trainer", "line_number": 182, "usage_type": "call"}, {"api_name": "transformers.integrations.MLflowCallback", "line_number": 192, "usage_type": "argument"}]} +{"seq_id": "17304821808", "text": "from telegram import ReplyKeyboardRemove, Update\nfrom telegram.ext import CallbackContext\n\n\nwhite_list = None\n\n\ndef init(permissions):\n global white_list\n white_list = permissions\n\n\ndef start(update: Update, context: CallbackContext):\n user = update.message.from_user\n player = white_list.get_player(user.id)\n if player != -1:\n update.message.reply_text(\n f\"Hi {user.first_name}!\\n{white_list.get_rank_name(player.get_rank())} of Dragonscale castle\", reply_markup=ReplyKeyboardRemove())\n else:\n update.message.reply_text(\n f\"Hi {user.first_name}!\\n{white_list.get_rank_name(0)} of Dragonscale castle\", reply_markup=ReplyKeyboardRemove())\n", "repo_name": "nicholaslopiccolo/dragonscale_bot", "sub_path": "features/start.py", "file_name": "start.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "telegram.Update", "line_number": 13, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 13, "usage_type": "name"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 18, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "43560050332", "text": "import numpy as np\nfrom two_stage_clustering import TwoStageClustering\nfrom ClusteringValidationMetrics import *\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom SOM import SOM\nfrom Utils import *\nfrom sklearn.metrics import davies_bouldin_score, silhouette_score\nfrom time import time\n\n\ndef main_validation(version):\n '''\n Main to obtain the cluster validation measures (silhouette score and DB-index) for the implemented clustering methods\n '''\n\n # data\n data = pd.read_csv(\"Data/zipcodedata_KNN_version_\" + str(version) + \".csv\")\n data_normalised, _, _ = normalise(data)\n X = data_normalised.iloc[:,1:].values # exclude pc4 variable\n\n # parameters\n k_range = np.r_[2:21]\n map_shape = (8, 8)\n\n # measures\n DB_measures = np.zeros((len(k_range), 4)) # rows the k, colums the models (order: TSC-kmeanss, TSC-GMM, kmeans, GMM)\n silhouette_measures = np.zeros((len(k_range), 4)) # rows the k, colums the models\n\n # models\n model_SOM = SOM(X=X, map_shape=map_shape)\n model_SOM.train(print_progress=True)\n W = model_SOM.map # use this to train the kmeans and GMM for the TSC\n for i, k in enumerate(k_range):\n print(\"CURRENT k = %d\" % k)\n models = [TwoStageClustering(X=X, W=W, n_clusters=k, map_shape=map_shape),\n TwoStageClustering(X=X, W=W, n_clusters=k, clus_method=\"gmm\", map_shape=map_shape),\n KMeans(n_clusters=k, random_state=0, algorithm=\"full\", max_iter=5000, n_init=10),\n GaussianMixture(n_components=k, max_iter=5000, n_init=10, init_params=\"random\")]\n\n for j, model in enumerate(models):\n if j < 2: # first two models are two-stage models\n model.train(print_progress=False)\n elif j == 2:\n print(\"Training k-means....\")\n t0 = time()\n model.fit(X)\n print(\"The k-means algorithm took %.3f seconds\" % (time()-t0 ))\n elif j == 3:\n print(\"Training GMM....\")\n t0 = time()\n model.fit(X)\n print(\"The GMM algorithm took %.3f seconds\" % (time()-t0 ))\n\n labels = model.predict(X)\n DB_measures[i,j] = davies_bouldin_score(X, labels)\n silhouette_measures[i, j] = silhouette_score(X, labels)\n print(\"\")\n\n np.savetxt(\"Results/DB_measures.txt\", DB_measures, delimiter=',')\n np.savetxt(\"Results/silhouette_measures.txt\", silhouette_measures, delimiter=',')\n\n\ndef main_visul():\n '''\n Main to visualize the validation measures\n '''\n DB_measures = np.genfromtxt(\"Results/DB_measures.txt\", delimiter=',')\n silhouette_measures = np.genfromtxt(\"Results/silhouette_measures.txt\", delimiter=',')\n k_range = np.r_[2:21]\n k_indices = np.r_[0:19]\n # plot Davies Bouldin measures\n plt.plot(k_range, DB_measures[:, 0], 'cx-', label=\"TSC - Kmeans\", marker = 'o', markevery = [i for i in k_indices[1:]])\n plt.plot(k_range, DB_measures[:, 0], 'cx-', marker = '*', markevery = [k_indices[0]], markersize=14)\n\n plt.plot(k_range, DB_measures[:, 1], 'rx-', label=\"TSC - GMM\", marker = 'o', markevery = [i for i in k_indices[1:]])\n plt.plot(k_range, DB_measures[:, 1], 'rx-', marker='*', markevery=[k_indices[0]], markersize=14)\n\n plt.plot(k_range, DB_measures[:, 2], 'gx-', label=\"Kmeans\", marker = 'o', markevery = [i for i in k_indices[1:]])\n plt.plot(k_range, DB_measures[:, 2], 'gx-', marker='*', markevery=[k_indices[0]], markersize=14)\n\n plt.plot(k_range, DB_measures[:, 3], 'mx-', label=\"GMM\", marker = 'o', markevery = [i for i in k_indices[1:]])\n plt.plot(k_range, DB_measures[:, 3], 'mx-', marker='*', markevery=[k_indices[0]], markersize=14)\n\n plt.xlabel('Number of clusters', fontsize= 14)\n plt.ylabel('DB Index', fontsize= 14)\n plt.xticks(k_range)\n #plt.title('Davies-Bouldin score for the clustering methods version ' + str(version))\n #plt.legend(fontsize=12, fancybox=True, edgecolor = \"black\", frameon=True)\n plt.savefig(\"Figures/Plots/DB_validation_plot.png\")\n plt.show()\n\n # plot Silhouette measures\n plt.plot(k_range, silhouette_measures[:, 0], 'cx-', label=\"TS k-means\", marker = 'o', markevery=[i for i in k_indices[1:]])\n plt.plot(k_range, silhouette_measures[:, 0], 'cx-', marker = '*', markevery = [k_indices[0]], markersize=14)\n\n plt.plot(k_range, silhouette_measures[:, 1], 'rx-', label=\"TS GMM\", marker = 'o', markevery=[i for i in k_indices[1:]])\n plt.plot(k_range, silhouette_measures[:, 1], 'rx-', marker = '*', markevery = [k_indices[0]], markersize=14)\n\n plt.plot(k_range, silhouette_measures[:, 2], 'gx-', label=\"k-means\", marker = 'o', markevery=[i for i in k_indices[1:]])\n plt.plot(k_range, silhouette_measures[:, 2], 'gx-', marker='*', markevery = [k_indices[0]], markersize=14)\n\n plt.plot(k_range, silhouette_measures[:, 3], 'mx-', label=\"GMM\", marker = 'o', markevery=[i for i in k_indices[1:]])\n plt.plot(k_range, silhouette_measures[:, 3], 'mx-', marker='*', markevery = [k_indices[0]], markersize=14)\n\n plt.xlabel('Number of clusters', fontsize= 14)\n plt.ylabel('Average Silhouette Coefficient', fontsize=14)\n plt.xticks(k_range)\n #plt.title('Silhouette score for the clustering methods version ' + str(version))\n plt.legend(fontsize=12, fancybox=True, edgecolor = \"black\", frameon =True)\n plt.savefig(\"Figures/Plots/Silhouette_validation_plot.png\")\n plt.show()\n\n\nif __name__ == '__main__':\n version = 10\n main_validation(version)\n main_visul()\n", "repo_name": "StefanLam99/UnileverCase", "sub_path": "main_cluster_validation.py", "file_name": "main_cluster_validation.py", "file_ext": "py", "file_size_in_byte": 5572, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.r_", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "SOM.SOM", "line_number": 31, "usage_type": "call"}, {"api_name": "two_stage_clustering.TwoStageClustering", "line_number": 36, "usage_type": "call"}, {"api_name": "two_stage_clustering.TwoStageClustering", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.metrics.davies_bouldin_score", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics.silhouette_score", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "72196372574", "text": "import json\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Avg, Max, Min\nfrom mainapp.views import addPurchasesViewed, productRecommendation\nfrom animators.models import Agency, Animator, ImageLibrary, Reviews, Questions, Audits, AuditElement\n\n# Create your views here.\ndef animatorsView(request):\n agencies = Agency.objects.all()[:7]\n agencies_count = Agency.objects.all().count()\n \n for agency in agencies:\n agency.additional_data = Animator.objects.filter(agency=agency)[:4]\n\n return render(request, 'animators.html', {\"agencies\": agencies, \"agencies_count\": agencies_count})\n\ndef animatorView(request, animator_slug):\n subcategories = []\n agency = get_object_or_404(Agency, slug=animator_slug)\n animators = Animator.objects.filter(agency=agency)\n audit = Audits.objects.filter(agency_id=agency.id)[:2]\n reviews = Reviews.objects.filter(agency_id=agency.id)[:2]\n questions = Questions.objects.filter(agency_id=agency.id)[:2]\n recommendations = productRecommendation()\n \n if not request.session.get('viewed_products'):\n request.session['viewed_products'] = list()\n listPurchasesViewed = False\n else:\n listPurchasesViewed = request.session['viewed_products']\n \n for animator in animators:\n if not animator.subcategory in subcategories:\n subcategories.append(animator.subcategory)\n\n if not audit:\n audit_elements = False\n else:\n audit_elements = AuditElement.objects.filter(audit_id=audit[0].id)[:2]\n\n if not questions:\n questions = False\n\n if not reviews:\n reviews = False\n \n return render(request, 'animator.html', {\n \"agency\": agency,\n \"animators\": animators,\n \"name\": \"animator\",\n \"subcategories\": subcategories,\n 'audit_elements': audit_elements,\n 'reviews': reviews,\n 'questions': questions,\n 'recommendations': recommendations,\n 'listPurchasesViewed': listPurchasesViewed\n })", "repo_name": "Alvearium/kiddeo", "sub_path": "animators/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "animators.models.Agency.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "animators.models.Agency.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "animators.models.Agency", "line_number": 11, "usage_type": "name"}, {"api_name": "animators.models.Agency.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "animators.models.Agency.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "animators.models.Agency", "line_number": 12, "usage_type": "name"}, {"api_name": "animators.models.Animator.objects.filter", "line_number": 15, "usage_type": "call"}, {"api_name": "animators.models.Animator.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "animators.models.Animator", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 21, "usage_type": "call"}, {"api_name": "animators.models.Agency", "line_number": 21, "usage_type": "argument"}, {"api_name": "animators.models", "line_number": 22, "usage_type": "name"}, {"api_name": "animators.models.Animator.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "animators.models.Animator.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "animators.models.Animator", "line_number": 22, "usage_type": "name"}, {"api_name": "animators.models.Audits.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "animators.models.Audits.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "animators.models.Audits", "line_number": 23, "usage_type": "name"}, {"api_name": "animators.models.Reviews.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "animators.models.Reviews.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "animators.models.Reviews", "line_number": 24, "usage_type": "name"}, {"api_name": "animators.models.Questions.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "animators.models.Questions.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "animators.models.Questions", "line_number": 25, "usage_type": "name"}, {"api_name": "mainapp.views.productRecommendation", "line_number": 26, "usage_type": "call"}, {"api_name": "animators.models", "line_number": 34, "usage_type": "name"}, {"api_name": "animators.models.AuditElement.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "animators.models.AuditElement.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "animators.models.AuditElement", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "animators.models", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "13219946602", "text": "import asyncio\nimport logging\n\nimport psycopg_pool # pip install \"psycopg[binary]\" , pip install psycopg_pool , pip install psycopg\nfrom aiogram import Bot, Dispatcher\nfrom aiogram import F\nfrom aiogram.enums import ContentType\nfrom aiogram.filters import Command, CommandStart\nfrom aiogram.fsm.storage.redis import RedisStorage # pip install redis\nfrom apscheduler.jobstores.redis import RedisJobStore\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler # pip install apscheduler\nfrom apscheduler_di import ContextSchedulerDecorator # pip install apscheduler-di\n\nfrom core.settings import settings\nfrom core.filters.iscontact import IsTrueContact\nfrom core.handlers import form\nfrom core.handlers import send_media\nfrom core.handlers.basic import get_start, get_photo, get_hello, get_location, get_inline\nfrom core.handlers.callback import select_macbook\nfrom core.handlers.contact import get_true_contact, get_fake_contact\nfrom core.handlers.pay import order, pre_checkout_query, successful_payment, shiping_check\nfrom core.middlewares.apschedulermiddleware import SchedulerMiddleware\nfrom core.middlewares.countermiddleware import CounterMiddleware\nfrom core.middlewares.dbmiddleware import DbSession\nfrom core.middlewares.example_chat_action_middleware import ExampleChatActionMiddleware\nfrom core.utils.callbackdata import MacInfo\nfrom core.utils.commands import set_commands\nfrom core.utils.statesform import StatesForm\n\nasyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # for psycopg\n\n\nasync def start_bot(bot: Bot):\n await set_commands(bot)\n await bot.send_message(settings.bots.admin_id, text=\"Bot Started\")\n\n\nasync def stop_bot(bot: Bot):\n await bot.send_message(settings.bots.admin_id, text=\"Bot Stoped\")\n\n\ndef create_pool():\n # return await asyncpg.create_pool(user=\"postgres\", password=\"postgres\", database=\"users\",\n # host=\"127.0.0.1\", port=5432, command_timeout=60) # asyncpg version needs to\n # be async\n\n return psycopg_pool.AsyncConnectionPool(f\"host=127.0.0.1 port=5432 dbname=users user=postgres password=postgres \"\n f\"connect_timeout=60\") # psycopg version\n\n\nasync def start():\n logging.basicConfig(level=logging.INFO,\n format=\"%(asctime)s:[%(levelname)s]:%(name)s:\"\n \"(%(filename)s).%(funcName)s(%(lineno)d):%(message)s\")\n bot = Bot(token=settings.bots.bot_token, parse_mode='HTML')\n\n pool_connect = create_pool() # if asyncpg use async\n\n # Redis Storage\n\n storage = RedisStorage.from_url('redis://localhost:6379/0')\n\n # Initialize the Dispatcher\n dp = Dispatcher(storage=storage)\n\n jobstores = {\n 'default': RedisJobStore(jobs_key='dispatched_trips_jobs',\n run_times_key='dispatched_trips_running',\n host='localhost',\n db=2,\n port=6379)\n }\n\n # Register Schedulers\n scheduler = ContextSchedulerDecorator(AsyncIOScheduler(timezone=\"Europe/Riga\", jobstores=jobstores))\n scheduler.ctx.add_instance(bot, declared_class=Bot)\n # scheduler.add_job(apscheduler.send_message_time, trigger='date', run_date=datetime.now() + timedelta(seconds=10))\n # scheduler.add_job(apscheduler.send_message_cron, trigger='cron', hour=datetime.now().hour,\n # minute=datetime.now().minute + 1, start_date=datetime.now())\n # scheduler.add_job(apscheduler.send_message_interval, trigger='interval', seconds=60)\n # scheduler.start()\n\n # Register middlewares\n dp.update.middleware.register(DbSession(pool_connect))\n dp.update.middleware.register(SchedulerMiddleware(scheduler))\n dp.message.middleware.register(CounterMiddleware())\n dp.message.middleware.register(ExampleChatActionMiddleware())\n # dp.update.middleware.register(OfficeHoursMiddleware())\n\n # Register startup and shutdown procedures\n dp.startup.register(start_bot)\n dp.shutdown.register(stop_bot)\n\n # Register commands\n dp.message.register(get_start, CommandStart())\n dp.message.register(get_inline, Command(\"inline\"))\n dp.message.register(order, Command(\"pay\"))\n dp.message.register(form.get_form, Command(commands=\"form\"))\n dp.message.register(send_media.get_audio, Command(commands=\"audio\"), flags={'chat_action': 'upload_document'})\n dp.message.register(send_media.get_document, Command(commands=\"document\"), flags={'chat_action': 'upload_document'})\n dp.message.register(send_media.get_meda_group, Command(commands=\"mediagroup\"),\n flags={'chat_action': 'upload_photo'})\n dp.message.register(send_media.get_photo, Command(commands=\"photo\"), flags={'chat_action': 'upload_photo'})\n dp.message.register(send_media.get_sticker, Command(commands=\"sticker\"), flags={'chat_action': 'choose_sticker'})\n dp.message.register(send_media.get_video, Command(commands=\"video\"), flags={'chat_action': 'upload_video'})\n dp.message.register(send_media.get_video_note, Command(commands=\"video_note\"),\n flags={'chat_action': 'upload_video_note'})\n dp.message.register(send_media.get_voice, Command(commands=\"voice\"), flags={'chat_action': 'upload_voice'})\n\n # Register States\n dp.message.register(form.get_last_name, StatesForm.GET_LAST_NAME)\n dp.message.register(form.get_name, StatesForm.GET_NAME)\n dp.message.register(form.get_age, StatesForm.GET_AGE)\n\n # Register Messages\n dp.message.register(get_photo, F.photo)\n dp.message.register(get_hello, F.text == \"hi\")\n dp.message.register(get_true_contact, F.content_type == ContentType.CONTACT, IsTrueContact())\n dp.message.register(get_fake_contact, F.content_type == ContentType.CONTACT)\n dp.message.register(get_location, F.location)\n\n # Callback registers\n dp.callback_query.register(select_macbook, MacInfo.filter(F.model == \"pro\"))\n\n # Additional registers\n dp.message.register(successful_payment, F.content_type == ContentType.SUCCESSFUL_PAYMENT)\n dp.pre_checkout_query.register(pre_checkout_query)\n dp.shipping_query.register(shiping_check)\n\n try:\n await dp.start_polling(bot)\n finally:\n await bot.session.close()\n\n\nif __name__ == \"__main__\":\n asyncio.run(start())\n", "repo_name": "FR13NDS2020/aiogram-template-functionality", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "asyncio.set_event_loop_policy", "line_number": 30, "usage_type": "call"}, {"api_name": "asyncio.WindowsSelectorEventLoopPolicy", "line_number": 30, "usage_type": "call"}, {"api_name": "aiogram.Bot", "line_number": 33, "usage_type": "name"}, {"api_name": "core.utils.commands.set_commands", "line_number": 34, "usage_type": "call"}, {"api_name": "core.settings.settings.bots", "line_number": 35, "usage_type": "attribute"}, {"api_name": "core.settings.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "aiogram.Bot", "line_number": 38, "usage_type": "name"}, {"api_name": "core.settings.settings.bots", "line_number": 39, "usage_type": "attribute"}, {"api_name": "core.settings.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "psycopg_pool.AsyncConnectionPool", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 52, "usage_type": "attribute"}, {"api_name": "aiogram.Bot", "line_number": 55, "usage_type": "call"}, {"api_name": "core.settings.settings.bots", "line_number": 55, "usage_type": "attribute"}, {"api_name": "core.settings.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "aiogram.fsm.storage.redis.RedisStorage.from_url", "line_number": 61, "usage_type": "call"}, {"api_name": "aiogram.fsm.storage.redis.RedisStorage", "line_number": 61, "usage_type": "name"}, {"api_name": "aiogram.Dispatcher", "line_number": 64, "usage_type": "call"}, {"api_name": "apscheduler.jobstores.redis.RedisJobStore", "line_number": 67, "usage_type": "call"}, {"api_name": "apscheduler_di.ContextSchedulerDecorator", "line_number": 75, "usage_type": "call"}, {"api_name": "apscheduler.schedulers.asyncio.AsyncIOScheduler", "line_number": 75, "usage_type": "call"}, {"api_name": "aiogram.Bot", "line_number": 76, "usage_type": "name"}, {"api_name": "core.middlewares.dbmiddleware.DbSession", "line_number": 84, "usage_type": "call"}, {"api_name": "core.middlewares.apschedulermiddleware.SchedulerMiddleware", "line_number": 85, "usage_type": "call"}, {"api_name": "core.middlewares.countermiddleware.CounterMiddleware", "line_number": 86, "usage_type": "call"}, {"api_name": "core.middlewares.example_chat_action_middleware.ExampleChatActionMiddleware", "line_number": 87, "usage_type": "call"}, {"api_name": "core.handlers.basic.get_start", "line_number": 95, "usage_type": "argument"}, {"api_name": "aiogram.filters.CommandStart", "line_number": 95, "usage_type": "call"}, {"api_name": "core.handlers.basic.get_inline", "line_number": 96, "usage_type": "argument"}, {"api_name": "aiogram.filters.Command", "line_number": 96, "usage_type": "call"}, {"api_name": "core.handlers.pay.order", "line_number": 97, "usage_type": "argument"}, {"api_name": "aiogram.filters.Command", "line_number": 97, "usage_type": "call"}, {"api_name": "core.handlers.form.get_form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "core.handlers.form", "line_number": 98, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 98, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_audio", "line_number": 99, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 99, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 99, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_document", "line_number": 100, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 100, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 100, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_meda_group", "line_number": 101, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 101, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 101, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_photo", "line_number": 103, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 103, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 103, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_sticker", "line_number": 104, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 104, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 104, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_video", "line_number": 105, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 105, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 105, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_video_note", "line_number": 106, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 106, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 106, "usage_type": "call"}, {"api_name": "core.handlers.send_media.get_voice", "line_number": 108, "usage_type": "attribute"}, {"api_name": "core.handlers.send_media", "line_number": 108, "usage_type": "name"}, {"api_name": "aiogram.filters.Command", "line_number": 108, "usage_type": "call"}, {"api_name": "core.handlers.form.get_last_name", "line_number": 111, "usage_type": "attribute"}, {"api_name": "core.handlers.form", "line_number": 111, "usage_type": "name"}, {"api_name": "core.utils.statesform.StatesForm.GET_LAST_NAME", "line_number": 111, "usage_type": "attribute"}, {"api_name": "core.utils.statesform.StatesForm", "line_number": 111, "usage_type": "name"}, {"api_name": "core.handlers.form.get_name", "line_number": 112, "usage_type": "attribute"}, {"api_name": "core.handlers.form", "line_number": 112, "usage_type": "name"}, {"api_name": "core.utils.statesform.StatesForm.GET_NAME", "line_number": 112, "usage_type": "attribute"}, {"api_name": "core.utils.statesform.StatesForm", "line_number": 112, "usage_type": "name"}, {"api_name": "core.handlers.form.get_age", "line_number": 113, "usage_type": "attribute"}, {"api_name": "core.handlers.form", "line_number": 113, "usage_type": "name"}, {"api_name": "core.utils.statesform.StatesForm.GET_AGE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "core.utils.statesform.StatesForm", "line_number": 113, "usage_type": "name"}, {"api_name": "core.handlers.basic.get_photo", "line_number": 116, "usage_type": "argument"}, {"api_name": "aiogram.F.photo", "line_number": 116, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 116, "usage_type": "name"}, {"api_name": "core.handlers.basic.get_hello", "line_number": 117, "usage_type": "argument"}, {"api_name": "aiogram.F.text", "line_number": 117, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 117, "usage_type": "name"}, {"api_name": "core.handlers.contact.get_true_contact", "line_number": 118, "usage_type": "argument"}, {"api_name": "aiogram.F.content_type", "line_number": 118, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 118, "usage_type": "name"}, {"api_name": "aiogram.enums.ContentType.CONTACT", "line_number": 118, "usage_type": "attribute"}, {"api_name": "aiogram.enums.ContentType", "line_number": 118, "usage_type": "name"}, {"api_name": "core.filters.iscontact.IsTrueContact", "line_number": 118, "usage_type": "call"}, {"api_name": "core.handlers.contact.get_fake_contact", "line_number": 119, "usage_type": "argument"}, {"api_name": "aiogram.F.content_type", "line_number": 119, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 119, "usage_type": "name"}, {"api_name": "aiogram.enums.ContentType.CONTACT", "line_number": 119, "usage_type": "attribute"}, {"api_name": "aiogram.enums.ContentType", "line_number": 119, "usage_type": "name"}, {"api_name": "core.handlers.basic.get_location", "line_number": 120, "usage_type": "argument"}, {"api_name": "aiogram.F.location", "line_number": 120, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 120, "usage_type": "name"}, {"api_name": "core.handlers.callback.select_macbook", "line_number": 123, "usage_type": "argument"}, {"api_name": "core.utils.callbackdata.MacInfo.filter", "line_number": 123, "usage_type": "call"}, {"api_name": "core.utils.callbackdata.MacInfo", "line_number": 123, "usage_type": "name"}, {"api_name": "aiogram.F.model", "line_number": 123, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 123, "usage_type": "name"}, {"api_name": "core.handlers.pay.successful_payment", "line_number": 126, "usage_type": "argument"}, {"api_name": "aiogram.F.content_type", "line_number": 126, "usage_type": "attribute"}, {"api_name": "aiogram.F", "line_number": 126, "usage_type": "name"}, {"api_name": "aiogram.enums.ContentType.SUCCESSFUL_PAYMENT", "line_number": 126, "usage_type": "attribute"}, {"api_name": "aiogram.enums.ContentType", "line_number": 126, "usage_type": "name"}, {"api_name": "core.handlers.pay.pre_checkout_query", "line_number": 127, "usage_type": "argument"}, {"api_name": "core.handlers.pay.shiping_check", "line_number": 128, "usage_type": "argument"}, {"api_name": "asyncio.run", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "17132925513", "text": "from kfp.components import InputPath, OutputPath, create_component_from_func\n\ndef convert_CatBoostModel_to_ONNX(\n model_path: InputPath('CatBoostModel'),\n converted_model_path: OutputPath('ONNX'),\n):\n '''Convert CatBoost model to ONNX format.\n\n Args:\n model_path: Path of a trained model in binary CatBoost model format.\n converted_model_path: Output path for the converted model.\n\n Outputs:\n converted_model: Model in ONNX format.\n\n Annotations:\n author: Alexey Volkov \n '''\n from catboost import CatBoost\n\n model = CatBoost()\n model.load_model(model_path)\n model.save_model(converted_model_path, format=\"onnx\")\n\n\nif __name__ == '__main__':\n create_component_from_func(\n convert_CatBoostModel_to_ONNX,\n output_component_file='component.yaml',\n base_image='python:3.7',\n packages_to_install=['catboost==0.22'],\n annotations={\n \"author\": \"Alexey Volkov \",\n \"canonical_location\": \"https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/convert_CatBoostModel_to_ONNX/component.yaml\",\n },\n )\n", "repo_name": "kubeflow/pipelines", "sub_path": "components/contrib/CatBoost/convert_CatBoostModel_to_ONNX/component.py", "file_name": "component.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3324, "dataset": "github-code", "pt": "33", "api": [{"api_name": "kfp.components.InputPath", "line_number": 4, "usage_type": "call"}, {"api_name": "kfp.components.OutputPath", "line_number": 5, "usage_type": "call"}, {"api_name": "catboost.CatBoost", "line_number": 21, "usage_type": "call"}, {"api_name": "kfp.components.create_component_from_func", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "16785297044", "text": "# coding:utf-8\n\nimport urllib\nimport urllib2\nfrom lxml import etree\nfrom bs4 import BeautifulSoup as bsoup\n\ndef load_page(url):\n \"\"\"\n 向网站发送请求\n :return:\n \"\"\"\n ag_header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36\"}\n\n request = urllib2.Request(url, headers=ag_header)\n html = urllib2.urlopen(request).read()\n test = bsoup(html)\n print(test.prettify())\n\n xml_data = etree.HTML(html)\n #link_list = xml_data.xpath('//div[@class=\"wrappic\"]//img/@src')\n link_list = xml_data.xpath('//img[@class=\"BDE_Image\"]/@src')\n print(\"link_list: \" + str(link_list))\n # for item in link_list:\n # save_image(item)\n\n\ndef save_image(link):\n \"\"\"\n 保存接收到的数据为本地图片\n :return:\n \"\"\"\n ag_header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36\"}\n file_name = link[-16:]\n print(\"开始下载图片%s\" % file_name)\n request = urllib2.Request(link, headers=ag_header)\n data = urllib2.urlopen(request).read()\n with open(file_name, \"wb\") as f:\n f.write(data)\n print(\"保存图片%s完成!\" % file_name)\n\n\ndef beauty_spider():\n \"\"\"\n 进行总体调度\n :return:\n \"\"\"\n url = \"https://tieba.baidu.com/p/5498979379/\"\n load_page(url)\n\n\nif __name__ == '__main__':\n beauty_spider()\n", "repo_name": "AirsPacino/heima", "sub_path": "07-爬虫/02-urllib2高级-正则-xml/09-xpath爬取美女图片.py", "file_name": "09-xpath爬取美女图片.py", "file_ext": "py", "file_size_in_byte": 1495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "urllib2.Request", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 21, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 21, "usage_type": "name"}, {"api_name": "urllib2.Request", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "4129884979", "text": "import matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport scipy\n\ndata = {'standard':{}, 'nst':{}, 'mix':{}, 'nstmix':{}}\nfold_track = {'standard':{}, 'nst':{}, 'mix':{}, 'nstmix':{}}\nresults_dir = '/nfs/masi/hansencb/nlst_nst_mixmatch/results'\nnums = [4, 20, 40, 200, 400, 800, 1200, 2000, 2800, 3600]\n\nresults_dirs = os.listdir(results_dir)\nresults_dirs.sort()\n\nfor result_dir in results_dirs:\n parts = result_dir.split('_')\n # key = '_'.join(parts[1:])\n\n fold = int(parts[0][-1])\n num = int(parts[1].split('d')[-1])\n nst = float(parts[2].split('a')[-1])\n lamb = float(parts[3].split('a')[-1])\n\n for n in nums:\n if abs(num-n) < 5:\n num = n\n\n key = '{}_{}_{}'.format(num, nst, lamb)\n\n npz_file = os.path.join(results_dir, result_dir, 'metrics.npz')\n\n if os.path.isfile(npz_file):\n npz = np.load(npz_file)\n auc = npz['arr_0']\n fpr = npz['arr_1']\n tpr = npz['arr_2']\n scores = npz['arr_3']\n\n if nst == 0 and lamb == 0:\n if num not in data['standard']:\n data['standard'][num] = np.zeros(5) + np.NaN\n data['standard'][num][fold] = auc\n elif nst != 0 and lamb != 0:\n key = '{}_{}'.format(nst, lamb)\n if num not in data['nstmix']:\n data['nstmix'][num] = {}\n if key not in data['nstmix'][num]:\n data['nstmix'][num][key] = np.zeros(5) + np.NaN\n data['nstmix'][num][key][fold] = auc\n elif nst != 0:\n if num not in data['nst']:\n data['nst'][num] = {}\n if nst not in data['nst'][num]:\n data['nst'][num][nst] = np.zeros(5) + np.NaN\n data['nst'][num][nst][fold] = auc\n else:\n if num not in data['mix']:\n data['mix'][num] = {}\n if lamb not in data['mix'][num]:\n data['mix'][num][lamb] = np.zeros(5) + np.NaN\n data['mix'][num][lamb][fold] = auc\n\nnums = nums[1:-3]\n\nimg = []\nfor num in nums:\n d = data['nst'][num]\n nsts = list(d.keys())\n nsts.sort()\n\n row = []\n for nst in nsts:\n row.append(np.nanmean(d[nst]))\n img.append(row)\n\n data['nst'][num] = data['nst'][num][nsts[np.argmax(row)]]\n\n\nimg = []\nfor num in nums:\n d = data['mix'][num]\n lambs = list(d.keys())\n lambs.sort()\n\n row = []\n for lamb in lambs:\n row.append(np.nanmean(d[lamb]))\n img.append(row)\n data['mix'][num] = data['mix'][num][lambs[np.argmax(row)]]\n\n\nimg = []\nfor num in nums:\n d = data['nstmix'][num]\n hypers = list(d.keys())\n\n row = []\n for hyper in hypers:\n if np.sum(np.isnan(d[hyper]))<2:\n row.append(np.nanmean(d[hyper]))\n else:\n row.append(0)\n img.append(row)\n data['nstmix'][num] = data['nstmix'][num][hypers[np.argmax(row)]]\n\n\n\n\nkeys = ['standard', 'mix', 'nst', 'nstmix']\nlegend = ['Baseline', 'Supervised', 'MixMatch', 'Nullspace Tuning', 'MixMatchNST']\n\n\nauc_out_file = 'nlst_aucs.csv'\n\norig_auc = 0.7418\nx = np.arange(0, len(nums))\nplt.figure()\nplt.plot(x, np.zeros(len(nums)) + orig_auc)\n# plt.fill_between(x, np.zeros(len(nums)) + orig_auc + 0.0211, np.zeros(len(nums)) + orig_auc -.0211, alpha=0.25)\nplt.fill_between(x, np.zeros(len(nums)) + orig_auc + 0, np.zeros(len(nums)) + orig_auc -0, alpha=0.25)\n\nwith open(auc_out_file, 'w') as f:\n for i,k in enumerate(keys):\n y = []\n err = []\n f.write('{},fold0,fold1,fold2,fold3,fold4\\n'.format(legend[i+1]))\n for n in nums:\n f.write('numsubjects_{},{}\\n'.format(n,','.join([str(x) for x in data[k][n]])))\n y.append(np.mean(data[k][n]))\n err.append(np.std(data[k][n])/np.sqrt(len(data[k][n])))\n\n\n\n\n", "repo_name": "hanscol/NST_Mixmatch_NLST", "sub_path": "pytorch_src/auc_analysis.py", "file_name": "auc_analysis.py", "file_ext": "py", "file_size_in_byte": 3742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "27133498163", "text": "import requests\nimport re\n\nf = open('data/weather.csv', 'w', encoding='utf-8')\nurl = 'http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=131'\nreceived = requests.get(url)\n# print(received)\n# print(received.text)\n\n# 문제\n# city만 찾아보세요\n# city = re.findall(r'([가-힣]+)', received.text)\n# print(city)\n\n# 문제\n# location을 찾아보세요\n# re.DOTALL : 개행문자 무시.\n# .+ : 탐용적 (greedy)\n# .+? : 비탐욕적(non-greedy)\n# re.DOTALL 내가 찾는것이 여러줄에 걸쳐 있을 때\nlocations = re.findall(r'.+?', received.text, re.DOTALL)\nprint(len(locations))\n# print(locations)\n# for loc in locations:\n # print(loc)\n\n# print('='*50)\n\n# 문제\n# province와 city를 찾아보세요\nfor loc in locations:\n prov = re.findall(r'(.+)', loc)\n city = re.findall(r'(.+)', loc)\n # print(prov[0], city[0])\n\n # 문제\n # province와 city를 한번에 찾아보세요 (깔끔하게 출력 포함)\n prov_city = re.findall(r'(.+).+(.+)', loc, re.DOTALL)\n prov, city = prov_city[0]\n # print(prov_city[0])\n #print(prov, city)\n\n\n # 문제\n # data를 찾아보세요\n data = re.findall(r'.+?', loc, re.DOTALL)\n # print(len(data))\n\n for datum in data:\n # mode = re.findall(r'(.+)', datum)\n # tmEF = re.findall(r'(.+)', datum)\n # wf = re.findall(r'(.+)', datum)\n # tmn = re.findall(r'(.+)', datum)\n # tmax = re.findall(r'(.+)', datum)\n # rnSt = re.findall(r'(.+)', datum)\n # print(prov[0], city[0], mode[0], tmEF[0], wf[0], tmn[0], tmax[0], rnSt[0])\n pattern = r'<.+>(.+)'\n\n all_info = re.findall(pattern, datum)\n mode, tmEF, wf, tmn, tmax, rnSt = all_info # unpacking\n # print(prov, city, mode, tmEF, wf, tmn, tmax, rnSt, file=f, sep=',')\n # print(prov, city, all_info)\n # print(prov, city, all_info)\n print(prov, city, *all_info)\n\n # row = '{},{},{},{},{},{},{},{}\\n'.format(prov, city, mode, tmEF, wf, tmn, tmax, rnSt)\n # f.write(row)\n\n f.write(prov + ',')\n f.write(city + ',')\n f.write(mode + ',')\n f.write(tmEF + ',')\n f.write(wf + ',')\n f.write(tmn + ',')\n f.write(tmax + ',')\n f.write(rnSt + '\\n')\n\n\n# 문제\n# 기상청 데이터 파일을 저장하세요 (원본대로 읽을 수 있는 형태로 저장)\n# weather.csv\n\nf.close()\n\n\n", "repo_name": "JeongHwan-dev/AI-Programming-Course", "sub_path": "code/Day_08_02_weather.py", "file_name": "Day_08_02_weather.py", "file_ext": "py", "file_size_in_byte": 2578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 32, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 46, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 46, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "5964314518", "text": "import logging\nimport time\nimport struct\nimport socket\nimport pyaudio\nfrom unittest import signals\nfrom pydub import AudioSegment\nfrom Queue import Queue\nfrom ...config import server as server_config, audio as audio_config\nfrom ...encrypt import utils\nfrom ...queue import signals, utils as q_utils\nfrom ...encrypt import WELL1024, string__expanded_key, utils\n\ndef udp_send_audio(queue, signal_queue, queue__frames_to_save=None):\n\tp = pyaudio.PyAudio()\n\taudio_stream = p.open(format=audio_config.FORMAT, channels=audio_config.CHANNELS, rate=audio_config.RATE,\n\t\t\t\t\t\t input=True, frames_per_buffer=audio_config.CHUNK)\n\tudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\twhile signal_queue.empty():\n\t\taudio_data_clear = audio_stream.read(audio_config.CHUNK)\n\t\taudio_data_encrypted = utils.encrypt(audio_data_clear, utils.string__expanded_key)\n\t\tif queue__frames_to_save is not None:\n\t\t\tqueue__frames_to_save.put(audio_data_clear)\n\t\tudp.sendto(audio_data_encrypted,\n\t\t\t\t (server_config.SERVER_AUDIO_RECEIPT__ADDRESS, server_config.SERVER_AUDIO_RECEIPT__PORT))\n\tsignal_queue_data = signal_queue.get(block=True)\n\tassert signal_queue_data == signals.SIG_FINISH\n\taudio_stream.close()\n\tp.terminate()\n\tudp.close()\n\n\ndef udp_receive_audio(queue, signal_queue, dict_queue__incoming_frames):\n\tlogging.debug(\"About to start streaming UDP\")\n\tudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tudp.bind((server_config.SERVER_AUDIO_RECEIPT__ADDRESS, server_config.SERVER_AUDIO_RECEIPT__PORT))\n\twhile signal_queue.empty():\n\t\tsound_data, address = udp.recvfrom(audio_config.CHUNK * audio_config.CHANNELS * 2)\n\t\tif address not in dict_queue__incoming_frames.keys():\n\t\t\tdict_queue__incoming_frames[address] = Queue()\n\t\tdict_queue__incoming_frames[address].put(sound_data)\n\n\tsignal_queue_data = signal_queue.get(block=True)\n\tassert signal_queue_data == signals.SIG_FINISH\n\tudp.close()\n\n\n# frames_to_multicast replaces frames_to_play\ndef multicast_send_audio(queue, signal_queue, queue__frames_to_multicast):\n\tmulticast = (server_config.AUDIO_MULTICAST__ADDRESS, server_config.AUDIO_MULTICAST__PORT)\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t# sock.settimeout(server_config.AUDIO_MULTICAST__SENDER_TIMEOUT)\n\tsock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 1)\n\n\twhile signal_queue.empty():\n\t\tlogging.debug(\"Entered multicast send loop\")\n\t\tmulticast_data = queue__frames_to_multicast.get(block=True)\n\t\tlogging.debug(\"About to multicast raw data\")\n\t\tsock.sendto(multicast_data.raw_data, multicast)\n\t\tlogging.debug(\"Multicasted raw data\")\n\tsignal_queue_data = signal_queue.get(block=True)\n\tassert signal_queue_data == signals.SIG_FINISH\n\tsock.close()\n\n\ndef multicast_receive_audio(queue, signal_queue, queue__frames_to_play):\n\tlogging.debug(\"About to start receiving multicast\")\n\tmulticast = (server_config.AUDIO_MULTICAST__ADDRESS, server_config.AUDIO_MULTICAST__PORT)\n\treceiver_address = (server_config.AUDIO_MULTICAST__RECEIVER_ADDRESS, server_config.AUDIO_MULTICAST__RECEIVER_PORT)\n\n\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\tsock.bind(receiver_address)\n\n\tgroup = socket.inet_aton(server_config.AUDIO_MULTICAST__ADDRESS)\n\tmreq = struct.pack('=4sL', group, socket.INADDR_ANY)\n\tsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\tp = pyaudio.PyAudio()\n\twhile signal_queue.empty():\n\t\tlogging.debug(\"Entered multicast receipt loop\")\n\t\tsound_data, address = sock.recvfrom(audio_config.CHUNK * audio_config.CHANNELS * 2)\n\t\taudio_segment_data = AudioSegment(data=sound_data,\n\t\t\t\t\t\t\t\t\t\t sample_width=p.get_sample_size(format=audio_config.FORMAT),\n\t\t\t\t\t\t\t\t\t\t frame_rate=audio_config.RATE,\n\t\t\t\t\t\t\t\t\t\t channels=audio_config.CHANNELS)\n\t\tqueue__frames_to_play.put(audio_segment_data)\n\t\tlogging.debug(\"Wrote multicasted audio segment to queue\")\n\tp.terminate()\n\tsignal_queue_data = signal_queue.get(block=True)\n\tassert signal_queue_data == signals.SIG_FINISH\n\tsock.close()\n\n\ndef mix_audio(queue, signal_queue, dict_queue__incoming_frames, queue__frames_to_play, queue__frames_to_save):\n\tlogging.debug(\"About to mix audio streams\")\n\tp = pyaudio.PyAudio()\n\twhile signal_queue.empty():\n\t\tif len(dict_queue__incoming_frames) > 0:\n\t\t\tsliced_audio_segments = []\n\t\t\tincoming_stream_count = 0\n\t\t\tfor index in range(len(dict_queue__incoming_frames)):\n\t\t\t\tsliced_audio_segments.append(AudioSegment(\n\t\t\t\t\tdata=dict_queue__incoming_frames.values()[index].get(block=True),\n\t\t\t\t\tsample_width=p.get_sample_size(format=audio_config.FORMAT),\n\t\t\t\t\tframe_rate=audio_config.RATE,\n\t\t\t\t\tchannels=audio_config.CHANNELS))\n\t\t\t\tincoming_stream_count += 1\n\t\t\tmerged = sliced_audio_segments[0]\n\n\t\t\tfor index in range(1, incoming_stream_count):\n\t\t\t\tmerged = merged.overlay(sliced_audio_segments[index])\n\t\t\tqueue__frames_to_play.put(merged)\n\t\t\tqueue__frames_to_save.put(merged)\n\tp.terminate()\n\tsignal_data = signal_queue.get(block=True)\n\tassert signal_data == signals.SIG_FINISH\n\n\ndef play_audio(queue, signal_queue, queue__frames_to_play):\n\tlogging.debug(\"About to start playing audio\")\n\tp = pyaudio.PyAudio()\n\tstream = p.open(format=audio_config.FORMAT, channels=audio_config.CHANNELS,\n\t\t\t\t\trate=audio_config.RATE, output=True, frames_per_buffer=audio_config.CHUNK)\n\tstart_time = time.time()\n\tlast_purge_time = 0\n\twhile signal_queue.empty():\n\t\tif not queue__frames_to_play.empty():\n\t\t\talpha = queue__frames_to_play.get(block=True)\n\t\t\tstream.write(utils.decrypt(alpha.raw_data, utils.string__expanded_key), audio_config.CHUNK)\n\t\t\tpurge_time = int(time.time() - start_time)\n\t\t\tif purge_time % audio_config.PURGE_INTERVAL == 0 and purge_time != last_purge_time:\n\t\t\t\t# TODO Don't purge the entire queue. Maybe, about half-way.\n\t\t\t\tq_utils.clear_queue(queue__frames_to_play)\n\t\t\t\tlast_purge_time = purge_time\n\t\t\t# TODO Figure out when and where to decrypt the audio segments.\n\t\telse:\n\t\t\tstream.write(chr(128) * audio_config.CHUNK * 10, audio_config.CHUNK)\n\tsignal_queue_data = signal_queue.get(block=True)\n\tassert signal_queue_data == signals.SIG_FINISH\n\tq_utils.clear_queue(queue__frames_to_play)\n\tstream.close()\n\tp.terminate()\n", "repo_name": "asparsh29kumar/secure-audio-conferencing", "sub_path": "src/include/process/thread/stream_audio.py", "file_name": "stream_audio.py", "file_ext": "py", "file_size_in_byte": 6094, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pyaudio.PyAudio", "line_number": 15, "usage_type": "call"}, {"api_name": "config.audio.FORMAT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 16, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.audio.RATE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.audio.CHUNK", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 17, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 18, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 18, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.audio.CHUNK", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 21, "usage_type": "name"}, {"api_name": "encrypt.utils.encrypt", "line_number": 22, "usage_type": "call"}, {"api_name": "encrypt.utils", "line_number": 22, "usage_type": "name"}, {"api_name": "encrypt.utils.string__expanded_key", "line_number": 22, "usage_type": "attribute"}, {"api_name": "config.server.SERVER_AUDIO_RECEIPT__ADDRESS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 26, "usage_type": "name"}, {"api_name": "config.server.SERVER_AUDIO_RECEIPT__PORT", "line_number": 26, "usage_type": "attribute"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 28, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 35, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 36, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 36, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 36, "usage_type": "attribute"}, {"api_name": "config.server.SERVER_AUDIO_RECEIPT__ADDRESS", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 37, "usage_type": "name"}, {"api_name": "config.server.SERVER_AUDIO_RECEIPT__PORT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.audio.CHUNK", "line_number": 39, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 39, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Queue.Queue", "line_number": 41, "usage_type": "call"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 45, "usage_type": "name"}, {"api_name": "config.server.AUDIO_MULTICAST__ADDRESS", "line_number": 51, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 51, "usage_type": "name"}, {"api_name": "config.server.AUDIO_MULTICAST__PORT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 52, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 52, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 52, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_IP", "line_number": 54, "usage_type": "attribute"}, {"api_name": "socket.IP_MULTICAST_TTL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 59, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 61, "usage_type": "call"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 63, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 63, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 68, "usage_type": "call"}, {"api_name": "config.server.AUDIO_MULTICAST__ADDRESS", "line_number": 69, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 69, "usage_type": "name"}, {"api_name": "config.server.AUDIO_MULTICAST__PORT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "config.server.AUDIO_MULTICAST__RECEIVER_ADDRESS", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 70, "usage_type": "name"}, {"api_name": "config.server.AUDIO_MULTICAST__RECEIVER_PORT", "line_number": 70, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 72, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 72, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 72, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 73, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 73, "usage_type": "attribute"}, {"api_name": "socket.inet_aton", "line_number": 76, "usage_type": "call"}, {"api_name": "config.server.AUDIO_MULTICAST__ADDRESS", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.server", "line_number": 76, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 77, "usage_type": "call"}, {"api_name": "socket.INADDR_ANY", "line_number": 77, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_IP", "line_number": 78, "usage_type": "attribute"}, {"api_name": "socket.IP_ADD_MEMBERSHIP", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 81, "usage_type": "call"}, {"api_name": "config.audio.CHUNK", "line_number": 82, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 82, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pydub.AudioSegment", "line_number": 83, "usage_type": "call"}, {"api_name": "config.audio.FORMAT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 84, "usage_type": "name"}, {"api_name": "config.audio.RATE", "line_number": 85, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 85, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 86, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 86, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 91, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 91, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 96, "usage_type": "call"}, {"api_name": "pyaudio.PyAudio", "line_number": 97, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 103, "usage_type": "call"}, {"api_name": "config.audio.FORMAT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 105, "usage_type": "name"}, {"api_name": "config.audio.RATE", "line_number": 106, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 106, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 107, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 107, "usage_type": "name"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 117, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 117, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 121, "usage_type": "call"}, {"api_name": "pyaudio.PyAudio", "line_number": 122, "usage_type": "call"}, {"api_name": "config.audio.FORMAT", "line_number": 123, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 123, "usage_type": "name"}, {"api_name": "config.audio.CHANNELS", "line_number": 123, "usage_type": "attribute"}, {"api_name": "config.audio.RATE", "line_number": 124, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 124, "usage_type": "name"}, {"api_name": "config.audio.CHUNK", "line_number": 124, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "encrypt.utils.decrypt", "line_number": 130, "usage_type": "call"}, {"api_name": "encrypt.utils", "line_number": 130, "usage_type": "name"}, {"api_name": "encrypt.utils.string__expanded_key", "line_number": 130, "usage_type": "attribute"}, {"api_name": "config.audio.CHUNK", "line_number": 130, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 130, "usage_type": "name"}, {"api_name": "time.time", "line_number": 131, "usage_type": "call"}, {"api_name": "config.audio.PURGE_INTERVAL", "line_number": 132, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 132, "usage_type": "name"}, {"api_name": "queue.utils.clear_queue", "line_number": 134, "usage_type": "call"}, {"api_name": "queue.utils", "line_number": 134, "usage_type": "name"}, {"api_name": "config.audio.CHUNK", "line_number": 138, "usage_type": "attribute"}, {"api_name": "config.audio", "line_number": 138, "usage_type": "name"}, {"api_name": "queue.signals.SIG_FINISH", "line_number": 140, "usage_type": "attribute"}, {"api_name": "queue.signals", "line_number": 140, "usage_type": "name"}, {"api_name": "queue.utils.clear_queue", "line_number": 141, "usage_type": "call"}, {"api_name": "queue.utils", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "43379575537", "text": "'''\nThis python script is to extract the content of those jsonList stored in directory \"conversation_jsons\"\nThe format of those jsonLists have been discribed in webScraping.py\nOne jsonList corresponds to a page in the catalogue of sharegpt\n\nAfter preprossesing, the tokenized conversation will be stored in directory \"tokenized_answers\"\n(Currently, we only want to get the content of tokenized answers)\n\n'''\nimport json\nimport os\nimport numpy as np\nfrom transformers import GPT2TokenizerFast\ntokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n\n# the range of pages we want to tokenize\nstart_pages = 100\nend_pages = 200\n\n# settings of directory\njlists_file_dir = \"./conversation_jsons\"\njlists_filename_prefix = \"page_\"\nsaving_file_path = \"./tokenized_answers/answers_%d_%d\"%(start_pages, end_pages)\n\nconversation_set = set()\n\n### Tokenize each answer\ntokenized_answers = []\n# iterate each json list and extract its answers\nfor i in range(start_pages, end_pages+1):\n jlist_file_path = os.path.join(jlists_file_dir,jlists_filename_prefix + str(i))\n jsonlist = []\n with open(jlist_file_path, 'r') as f:\n jsonlist = json.load(f)\n \n for conversation_js in jsonlist:\n # there is a duplicate conversation, skip it\n if conversation_js['url'] in conversation_set:\n print(\"there is duplicate url, skip it\")\n continue\n \n conversation_set.add(conversation_js['url'])\n answers = conversation_js['a']\n for a in answers:\n tokenized_answers.append(tokenizer(a)['input_ids'])\n\nprint(\"Tokenization Complete the amount of total answers: \" + str(len(conversation_set)))\n\nshort_answers_amount = 0\nzero_answers_amount = 0\nfor tokens in tokenized_answers:\n # remove all the blank\n tokens = [i for i in tokens if i != 220]\n if len(tokens)<64:\n short_answers_amount += 1\n if len(tokens) == 0: \n zero_answers_amount += 1\n\nprint(\"zero_answers_amount\" + str(zero_answers_amount))\nprint(\"short_answers_amount\" + str(short_answers_amount))\nprint(\"total answers amount\" + str(len(tokenized_answers)))\n\n### Write tokenized Dataset to the binary file\nsaving_file = open(saving_file_path,'wb')\nfor tokens in tokenized_answers:\n tokens_len =int(len(tokens))\n # write its length\n saving_file.write(tokens_len.to_bytes(4,byteorder='little',signed=True))\n # write the list of tokens\n saving_file.write(np.array(tokens,dtype=np.uint32).tobytes())\n\n# close the file writing stream\nsaving_file.close()\nprint(saving_file_path + \"written\")", "repo_name": "pzcddm/ChatGptInvestigation", "sub_path": "WebScraping_ShareGpt/preprocessConversations.py", "file_name": "preprocessConversations.py", "file_ext": "py", "file_size_in_byte": 2536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "transformers.GPT2TokenizerFast.from_pretrained", "line_number": 14, "usage_type": "call"}, {"api_name": "transformers.GPT2TokenizerFast", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.uint32", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "39771728790", "text": "import os\r\nfrom dotenv import load_dotenv\r\nfrom openkey import openai_api_key\r\nimport openai\r\nfrom openai import api_key, Completion\r\nimport requests\r\nimport json\r\n\r\n\r\n# Utiliser la clé API OpenAI\r\nopenai.api_key = openai_api_key\r\n\r\n# Mapping des langages de programmation aux IDs des moteurs de génération de code correspondants\r\nENGINE_IDS = {\r\n \"python\": \"davinci-codex\",\r\n \"java\": \"davinci-codex-java-002\",\r\n \"javascript\": \"davinci-codex-javascript-002\",\r\n \"html\": \"davinci-codex-html-002\",\r\n \"css\": \"davinci-codex-css-002\",\r\n \"c\": \"davinci-codex-c-002\",\r\n \"cpp\": \"davinci-codex-cpp-002\",\r\n \"matlab\": \"davinci-codex-matlab-002\",\r\n \"scilab\": \"davinci-codex-scilab-002\",\r\n \"sql\": \"davinci-codex-sql-002\",\r\n \"mongodb\": \"davinci-codex-mongodb-002\",\r\n \"hadoop\": \"davinci-codex-hadoop-002\"\r\n}\r\n\r\n# Fonction pour générer du code à partir d'un prompt et d'un langage de programmation\r\ndef generate_code(prompt, language):\r\n # Appel de l'API OpenAI pour générer du code\r\n response = Completion.create(\r\n engine=ENGINE_IDS[language.lower()],\r\n prompt=prompt,\r\n max_tokens=1024,\r\n n=1,\r\n stop=None,\r\n temperature=0.5,\r\n )\r\n\r\n # Extraction du code généré de la réponse de l'API\r\n generated_code = response.choices[0].text.strip()\r\n\r\n return generated_code\r\n\r\n# Demander à l'utilisateur une question liée à la programmation et le langage de programmation souhaité\r\nquestion = input(\"Posez votre question liée à la programmation : \")\r\nlanguage = input(\"Dans quelle langage souhaitez-vous générer du code ? (Python, Java, JavaScript, HTML, CSS, C, C++, Matlab, Scilab, SQL, MongoDB, Hadoop) : \")\r\n\r\n# Exemples d'entrée et de sortie pour la question posée\r\ninput_examples = [\r\n \"Comment trier une liste en Python ?\",\r\n \"Comment ajouter un élément à une liste en Python ?\",\r\n \"Comment trouver la somme de deux nombres en Python ?\",\r\n \"Comment lire un fichier en Python ?\",\r\n \"Comment créer une classe en Python ?\"\r\n]\r\n\r\noutput_examples = [\r\n \"sorted_list = sorted(my_list)\",\r\n \"my_list.append(new_element)\",\r\n \"sum = num1 + num2\",\r\n \"with open('filename', 'r') as file:\",\r\n \"class MyClass:\\n def __init__(self):\\n pass\"\r\n]\r\n\r\n# Construire le prompt pour la génération de code à partir des exemples d'entrée et de sortie\r\nprompt = f\"Generer du code {language} pour : {question}\\n\\nExemples:\\n\"\r\n\r\nfor input_example, output_example in zip(input_examples, output_examples):\r\n prompt += f\"Input : {input_example}\\nOutput : {output_example}\\n\"\r\n\r\n# Appel de la fonction pour générer du code\r\ngenerated_code = generate_code(prompt, language)\r\n\r\n# Affichage du code généré\r\nprint(generated_code)\r\n", "repo_name": "Elzoghost/Chat_Bot_IA", "sub_path": "chat_code_am.py", "file_name": "chat_code_am.py", "file_ext": "py", "file_size_in_byte": 2751, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "openai.api_key", "line_number": 11, "usage_type": "attribute"}, {"api_name": "openkey.openai_api_key", "line_number": 11, "usage_type": "name"}, {"api_name": "openai.Completion.create", "line_number": 32, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "20181467688", "text": "from fastapi import Header\n\nfrom src.exceptions.invalid_token import InvalidTokenException\n\n\nclass HeaderTokenExtractor:\n def __init__(self, authorization: str = Header()) -> None:\n self.value = authorization\n\n async def __call__(self) -> str:\n v = self.value.lower()\n if 'bearer' not in v:\n raise InvalidTokenException()\n return self.value.split()[1]\n", "repo_name": "KaiserProger/kalina_backend", "sub_path": "src/auth/header_extract.py", "file_name": "header_extract.py", "file_ext": "py", "file_size_in_byte": 397, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "42", "api": [{"api_name": "fastapi.Header", "line_number": 7, "usage_type": "call"}, {"api_name": "src.exceptions.invalid_token.InvalidTokenException", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "20192294710", "text": "from setuptools import setup\nimport setup_compiler, setup_pyjamas, setup_pyjd\n__VERSION__='0.8.1'\n\n\nsetup(\n name=\"pyjs\",\n version=__VERSION__,\n packages=setup_compiler.packages + \n setup_pyjamas.packages + \n setup_pyjd.packages,\n package_dir = dict(setup_compiler.package_dir.items()+\n setup_pyjamas.package_dir.items()+\n setup_pyjd.package_dir.items()),\n package_data = dict(setup_compiler.package_data.items()+\n setup_pyjamas.package_data.items()+\n setup_pyjd.package_data.items()),\n install_requires = setup_compiler.install_requires + \n setup_pyjamas.install_requires + \n setup_pyjd.install_requires,\n entry_points = dict(setup_compiler.entry_points.items()+\n setup_pyjamas.entry_points.items()+\n setup_pyjd.entry_points.items()),\n zip_safe = False,\n )\n\n\n", "repo_name": "colintoast/pyjs", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "33", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setup_compiler.packages", "line_number": 9, "usage_type": "attribute"}, {"api_name": "setup_pyjamas.packages", "line_number": 10, "usage_type": "attribute"}, {"api_name": "setup_pyjd.packages", "line_number": 11, "usage_type": "attribute"}, {"api_name": "setup_compiler.package_dir.items", "line_number": 12, "usage_type": "call"}, {"api_name": "setup_compiler.package_dir", "line_number": 12, "usage_type": "attribute"}, {"api_name": "setup_pyjamas.package_dir.items", "line_number": 13, "usage_type": "call"}, {"api_name": "setup_pyjamas.package_dir", "line_number": 13, "usage_type": "attribute"}, {"api_name": "setup_pyjd.package_dir.items", "line_number": 14, "usage_type": "call"}, {"api_name": "setup_pyjd.package_dir", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setup_compiler.package_data.items", "line_number": 15, "usage_type": "call"}, {"api_name": "setup_compiler.package_data", "line_number": 15, "usage_type": "attribute"}, {"api_name": "setup_pyjamas.package_data.items", "line_number": 16, "usage_type": "call"}, {"api_name": "setup_pyjamas.package_data", "line_number": 16, "usage_type": "attribute"}, {"api_name": "setup_pyjd.package_data.items", "line_number": 17, "usage_type": "call"}, {"api_name": "setup_pyjd.package_data", "line_number": 17, "usage_type": "attribute"}, {"api_name": "setup_compiler.install_requires", "line_number": 18, "usage_type": "attribute"}, {"api_name": "setup_pyjamas.install_requires", "line_number": 19, "usage_type": "attribute"}, {"api_name": "setup_pyjd.install_requires", "line_number": 20, "usage_type": "attribute"}, {"api_name": "setup_compiler.entry_points.items", "line_number": 21, "usage_type": "call"}, {"api_name": "setup_compiler.entry_points", "line_number": 21, "usage_type": "attribute"}, {"api_name": "setup_pyjamas.entry_points.items", "line_number": 22, "usage_type": "call"}, {"api_name": "setup_pyjamas.entry_points", "line_number": 22, "usage_type": "attribute"}, {"api_name": "setup_pyjd.entry_points.items", "line_number": 23, "usage_type": "call"}, {"api_name": "setup_pyjd.entry_points", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "31719886121", "text": "__copyright__ = \"Copyright (C) 2009-2013 Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom functools import reduce\n\nimport numpy as np\n\nfrom modepy.quadrature import Quadrature\n\n\ndef _extended_euclidean(q, r):\n \"\"\"Return a tuple (p, a, b) such that p = aq + br,\n where p is the greatest common divisor.\n \"\"\"\n\n # see [Davenport], Appendix, p. 214\n\n if abs(q) < abs(r):\n p, a, b = _extended_euclidean(r, q)\n return p, b, a\n\n Q = 1, 0 # noqa: N806\n R = 0, 1 # noqa: N806\n\n while r:\n quot, t = divmod(q, r)\n T = Q[0] - quot*R[0], Q[1] - quot*R[1] # noqa: N806\n q, r = r, t\n Q, R = R, T # noqa: N806\n\n return q, Q[0], Q[1]\n\n\ndef _gcd(q, r):\n return _extended_euclidean(q, r)[0]\n\n\ndef _simplify_fraction(xxx_todo_changeme):\n (a, b) = xxx_todo_changeme\n gcd = _gcd(a, b)\n return (a//gcd, b//gcd)\n\n\nclass GrundmannMoellerSimplexQuadrature(Quadrature):\n r\"\"\"Cubature on an *n*-simplex.\n\n This cubature rule has both negative and positive weights.\n It is exact for polynomials up to order :math:`2s + 1`, where\n :math:`s` is given as *order*.\n\n The integration domain is the unit simplex. (see :ref:`tri-coords`\n and :ref:`tet-coords`)\n\n .. attribute:: exact_to\n\n The total degree up to which the quadrature is exact.\n\n See\n\n * A. Grundmann and H.M. Moeller,\n Invariant integration formulas for the n-simplex by combinatorial methods,\n SIAM J. Numer. Anal. 15 (1978), 282--290.\n http://dx.doi.org/10.1137/0715019\n\n .. automethod:: __init__\n .. automethod:: __call__\n \"\"\"\n\n # FIXME: most other functionality in modepy uses 'dims, order' as the\n # argument order convention.\n def __init__(self, order, dimension):\n \"\"\"\n :arg order: A parameter correlated with the total degree of polynomials\n that are integrated exactly. (See also :attr:`exact_to`.)\n :arg dimension: The number of dimensions for the quadrature rule.\n Any positive integer.\n \"\"\"\n s = order\n n = dimension\n d = 2*s + 1\n\n if dimension == 0:\n nodes = np.zeros((dimension, 1))\n weights = np.ones(1)\n\n Quadrature.__init__(self, nodes, weights)\n return\n\n import math\n\n from pytools import (\n generate_decreasing_nonnegative_tuples_summing_to,\n generate_unique_permutations, wandering_element)\n\n points_to_weights = {}\n\n for i in range(s + 1):\n weight = (-1)**i * 2**(-2*s) \\\n * (d + n - 2*i)**d \\\n / math.factorial(i) \\\n / math.factorial(d + n - i)\n\n for t in generate_decreasing_nonnegative_tuples_summing_to(s - i, n + 1):\n for beta in generate_unique_permutations(t):\n denominator = d + n - 2*i\n point = tuple(\n _simplify_fraction((2*beta_i + 1, denominator))\n for beta_i in beta)\n\n points_to_weights[point] = \\\n points_to_weights.get(point, 0) + weight\n\n from operator import add\n\n vertices = ([-1 * np.ones((n,))]\n + [np.array(x)\n for x in wandering_element(n, landscape=-1, wanderer=1)])\n\n nodes = []\n weights = []\n\n dim_factor = 2**n\n for p, w in points_to_weights.items():\n real_p = reduce(add, (a/b * v for (a, b), v in zip(p, vertices)))\n nodes.append(real_p)\n weights.append(dim_factor * w)\n\n super().__init__(np.array(nodes).T, np.array(weights), exact_to=d)\n", "repo_name": "inducer/modepy", "sub_path": "modepy/quadrature/grundmann_moeller.py", "file_name": "grundmann_moeller.py", "file_ext": "py", "file_size_in_byte": 4716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "33", "api": [{"api_name": "modepy.quadrature.Quadrature", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 103, "usage_type": "call"}, {"api_name": "modepy.quadrature.Quadrature.__init__", "line_number": 105, "usage_type": "call"}, {"api_name": "modepy.quadrature.Quadrature", "line_number": 105, "usage_type": "name"}, {"api_name": "math.factorial", "line_number": 119, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 120, "usage_type": "call"}, {"api_name": "pytools.generate_decreasing_nonnegative_tuples_summing_to", "line_number": 122, "usage_type": "call"}, {"api_name": "pytools.generate_unique_permutations", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 135, "usage_type": "call"}, {"api_name": "pytools.wandering_element", "line_number": 136, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 143, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "73301352094", "text": "import torch\n\n\ndef _safe_det_3x3(t: torch.Tensor):\n \"\"\"\n Fast determinant calculation for a batch of 3x3 matrices.\n\n Note, result of this function might not be the same as `torch.det()`.\n The differences might be in the last significant digit.\n\n Args:\n t: Tensor of shape (N, 3, 3).\n\n Returns:\n Tensor of shape (N) with determinants.\n \"\"\"\n\n det = (\n t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1])\n - t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2])\n + t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1])\n )\n\n return det\n", "repo_name": "facebookresearch/pytorch3d", "sub_path": "pytorch3d/common/workaround/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7843, "dataset": "github-code", "pt": "33", "api": [{"api_name": "torch.Tensor", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "15361662555", "text": "from typing import List\nfrom sklearn.base import BaseEstimator, TransformerMixin\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nclass Deg2Rad(BaseEstimator, TransformerMixin):\n def __init__(self, columns: List[str] = None):\n self.columns = columns\n\n def fit(self, X, y=None):\n\n if self.columns is None:\n self.columns = X.columns\n\n return self\n\n def transform(self, X, y=None):\n\n X_var = X[self.columns].values\n\n X_var = np.deg2rad(X_var)\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n def inverse_transform(self, X, y=None):\n\n X_var = X[self.columns].values\n\n X_var = np.rad2deg(X_var)\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n\nclass FixedScaler(BaseEstimator, TransformerMixin):\n def __init__(self, scale: np.ndarray = 1.0, columns: List[str] = None):\n self.columns = columns\n self.scale = scale\n\n def fit(self, X, y=None):\n if self.columns is None:\n self.columns = X.columns\n\n return self\n\n def transform(self, X, y=None):\n X_var = X[self.columns].values\n\n X_var *= self.scale\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n def inverse_transform(self, X, y=None):\n X_var = X[self.columns].values\n\n X_var /= self.scale\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n\nclass MinMaxFixedScaler(BaseEstimator, TransformerMixin):\n def __init__(\n self, min_val: np.ndarray, max_val: np.ndarray, columns: List[str] = None\n ):\n self.columns = columns\n self.min_val = np.asarray(min_val)\n self.max_val = np.asarray(max_val)\n\n def fit(self, X, y=None):\n if self.columns is None:\n self.columns = X.columns\n\n return self\n\n def transform(self, X, y=None):\n\n X_var = X[self.columns].values\n\n X_var = (X_var - self.min_val) / (self.max_val - self.min_val)\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n def inverse_transform(self, X, y=None):\n\n X_var = X[self.columns].values\n\n X_var = X_var * (self.max_val - self.min_val) + self.min_val\n\n X = pd.DataFrame(X_var, columns=self.columns)\n\n return X\n\n\nclass MinMaxDF(BaseEstimator, TransformerMixin):\n def __init__(\n self, columns: List[str] = None, min_val: float = -1, max_val: float = 1\n ):\n self.columns = columns\n self.min_val = min_val\n self.max_val = max_val\n\n def fit(self, X: pd.DataFrame, y=None):\n\n self.transformer = MinMaxScaler((self.min_val, self.max_val))\n\n if self.columns is None:\n self.columns = X.columns\n\n X_var = X[self.columns].values\n\n self.transformer.fit(X_var)\n\n return self\n\n def transform(self, X: pd.DataFrame, y=None):\n\n # print(f\"\\n\\nType: \", type(X), X.columns)\n\n X_var = X[self.columns].values\n # X_std = (X - self.min_val) / (self.max_val - self.min_val)\n # X_var = X_std * (self.min_val\n\n X_var = self.transformer.transform(X_var)\n\n # print(f\"\\n\\nSHAPE: {X_var.shape}\\n\\n\")\n # print(f\"\\nDF: {X.shape}\\n\\n\")\n # print(f\"\\nCOLUMNS: {self.columns}\\n\\n\")\n # print(f\"\\nX: {X[self.columns].shape}\\n\\n\")\n # X[self.columns].data = X_var\n # print(f\"DONE!\")\n X = pd.DataFrame(data=X_var, columns=self.columns)\n\n return X\n\n def inverse_transform(self, X: pd.DataFrame, y=None):\n\n X_var = X[self.columns].values\n\n X_var = self.transformer.inverse_transform(X_var)\n\n X[self.columns] = X_var\n\n return X\n", "repo_name": "jejjohnson/jejeqx", "sub_path": "jejeqx/_src/transforms/dataframe/scaling.py", "file_name": "scaling.py", "file_ext": "py", "file_size_in_byte": 3712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "sklearn.base.BaseEstimator", "line_number": 8, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.deg2rad", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.rad2deg", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 40, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 41, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 70, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 72, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 105, "usage_type": "name"}, {"api_name": "sklearn.base.TransformerMixin", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 107, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "attribute"}]} +{"seq_id": "16958616122", "text": "import logging\nimport os.path\n\nfrom lxml import etree\n\nlog = logging.getLogger(__name__)\n\nICML_RNG = os.path.join(os.path.dirname(__file__), 'externals', 'idml_schema', 'IDMarkupLanguage.rng')\n\ndef validate(icml):\n \"\"\"Validate the supplied ICML (etree) document against a default IDML schema. Returns boolean. Writes warnings\n to this classes' log stream.\"\"\"\n validator = etree.RelaxNG(etree.parse(ICML_RNG))\n valid = validator.validate(icml)\n if valid:\n return True\n else:\n error_log = validator.error_log\n log.warn('Validation errors:\\n%s' % error_log)\n return False\n\n\n\n\n", "repo_name": "jaumeortola/ickmull", "sub_path": "ickmull/icml.py", "file_name": "icml.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "33", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "lxml.etree.RelaxNG", "line_number": 13, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 13, "usage_type": "name"}, {"api_name": "lxml.etree.parse", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "41559914186", "text": "import sys\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nimport pmdarima as pm\nfrom matplotlib import pyplot as plt\n\nfrom utils import _add_error_xml, _add_info_xml, _read_parameters\n\n\ndef read_data(path, feature=None, target=[0]):\n dataframe = pd.read_csv(path, engine='python')\n dataset = dataframe.values\n columns = dataframe.columns\n # 将整型变为float\n dataset = dataset.astype('float32')\n if feature is None:\n feature_data = None\n feature_columns = None\n else:\n feature_data = dataset[:, feature]\n feature_columns = columns[feature]\n target_data = dataset[:, target]\n target_columns = columns[target]\n return feature_data, target_data, feature_columns, target_columns\n\n\ndef arima_fit(feature_data, target_data,\n seasonal=False, m=0, information_criterion='bic',\n start_p=2, d=None, start_q=2, max_p=5, max_d=2, max_q=5, ):\n if feature_data is not None and len(feature_data) != len(target_data):\n raise TypeError(\"输入的特征和目标的行数不一致\")\n arima = pm.auto_arima(target_data, X=feature_data, error_action='ignore', trace=True,\n suppress_warnings=True, maxiter=5,\n seasonal=seasonal, m=m, information_criterion=information_criterion, start_p=start_p, d=d,\n start_q=start_q, max_p=max_p, max_d=max_d, max_q=max_q, return_valid_fits=True)\n pickle_tgt = \"arima.pkl\"\n joblib.dump(arima[0], pickle_tgt)\n return arima\n\n\ndef arima_predict(model_path, file_path=None, n_periods=3):\n arima = joblib.load(model_path)\n if file_path is not None:\n dataframe = pd.read_csv(file_path, engine='python')\n dataset = dataframe.values\n dataset = dataset.astype('float32')\n return arima.predict(X=dataset, n_periods=len(dataset))\n return arima.predict(n_periods=n_periods)\n\n\ndef arima_prdict_insample(arima, X=None):\n return arima.predict_in_sample(X)\n\n\ndef arima(path, feature=None, target=[0], model_path=\"arima.pkl\", file_path=None, n_periods=3, seasonal=False, m=0, information_criterion='bic', start_p=2, d=None,\n start_q=2, max_p=5, max_d=2, max_q=5):\n feature_data, target_data, feature_columns, target_columns = read_data(\n path, feature=feature, target=target)\n arimas = arima_fit(feature_data, target_data,\n seasonal=seasonal, m=m, information_criterion=information_criterion,\n start_p=start_p, d=d, start_q=start_q, max_p=max_p, max_d=max_d, max_q=max_q)\n draw_bic_or_aic(arimas, information_criterion, seasonal=False)\n if feature_data is None:\n y_predict = arima_predict(\n model_path, file_path=None, n_periods=n_periods)\n draw_predict(arimas[0], target_data, y_predict, feature=None)\n else:\n y_predict = arima_predict(\n model_path, file_path=file_path, n_periods=n_periods)\n draw_predict(arimas[0], target_data, y_predict, feature=feature_data)\n result_data = pd.read_csv(file_path)\n result_data['prediction'] = y_predict\n result_data.to_csv('result.csv', index=False)\n return result_data\n\n\ndef draw_bic_or_aic(arimas, information_criterion='bic', seasonal=False):\n x = []\n y = []\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n if information_criterion == 'bic':\n plt.title('bic results for each model searched by grid traversal')\n elif information_criterion == 'aic':\n plt.title('aic results for each model searched by grid traversal')\n else:\n raise ValueError('information_criterion只能传递aic和bic两个值')\n for arima in arimas:\n arima_dict = arima.to_dict()\n x_item = arima_dict['order'].__str__()\n if seasonal:\n x_item = x_item + arima_dict['seasonal_order'].__str__()\n if x_item not in x:\n x.append(x_item)\n if information_criterion == 'bic':\n y.append(arima_dict['bic'])\n elif information_criterion == 'aic':\n y.append(arima_dict['aic'])\n else:\n index = x.index(x_item)\n if information_criterion == 'bic' and arima_dict['bic'] < y[index]:\n y[index] = arima_dict['bic']\n elif information_criterion == 'aic' and arima_dict['aic'] < y[index]:\n y[index] = arima_dict['aic']\n\n plt.barh(x, y) # 横放条形图函数 barh\n plt.savefig(f'{information_criterion}.jpg')\n plt.clf()\n\n\ndef draw_predict(arima, y, y_predict, feature=None):\n cur_predict = arima_prdict_insample(arima, feature)\n x = np.arange(len(cur_predict))\n plt.scatter(x, y, marker='x')\n plt.plot(x, cur_predict)\n x = []\n\n for i in range(len(y_predict)):\n x.append(len(cur_predict) + i)\n plt.plot(x, y_predict)\n\n plt.title('Actual test samples vs. forecasts')\n plt.savefig(\"result.jpg\")\n plt.clf()\n\n\ndef main():\n try:\n parameters = _read_parameters()\n except Exception as e:\n _add_error_xml(\"Parameters Error\", str(e))\n with open('log.txt', 'a') as fp:\n fp.write('error\\n')\n return\n try:\n result = arima(parameters['trainCSV'], feature=parameters['features'], target=parameters['target'],\n file_path=parameters['testCSV'], seasonal=parameters['seasonal'],\n m=parameters['m'], information_criterion=parameters['information_criterion'],\n start_p=parameters['start_p'], d=parameters['d'],\n start_q=parameters['start_q'], max_p=parameters['max_p'],\n max_d=parameters['max_d'], max_q=parameters['max_q'])\n except Exception as e:\n _add_error_xml(\"arima Error\", str(e))\n with open('log.txt', 'a') as fp:\n fp.write('error\\n')\n return \n try:\n _add_info_xml(picture_names=['result.jpg',f\"{parameters['information_criterion']}.jpg\"], result=result)\n except Exception as e:\n _add_error_xml(\"XML Error\", str(e))\n with open('log.txt', 'a') as fp:\n fp.write('error\\n')\n return\n \n with open('log.txt', 'a') as fp:\n fp.write('finish\\n')\n\n\n\nif __name__ == '__main__':\n console = sys.stdout\n file = open(\"task_log.txt\", \"w\")\n sys.stdout = file\n main()\n sys.stdout = console\n file.close\n", "repo_name": "yuyouyu32/106Lab_Project_Dep", "sub_path": "arima/workdir/arima.py", "file_name": "arima.py", "file_ext": "py", "file_size_in_byte": 6385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pmdarima.auto_arima", "line_number": 34, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 39, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 82, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 83, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "utils._read_parameters", "line_number": 131, "usage_type": "call"}, {"api_name": "utils._add_error_xml", "line_number": 133, "usage_type": "call"}, {"api_name": "utils._add_error_xml", "line_number": 145, "usage_type": "call"}, {"api_name": "utils._add_info_xml", "line_number": 150, "usage_type": "call"}, {"api_name": "utils._add_error_xml", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 163, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 165, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 167, "usage_type": "attribute"}]} +{"seq_id": "34326200393", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random as rd\r\nim1=plt.imread('8_groix_thirdArrowProcessing.bmp')\r\nim2=np.copy(im1)\r\nim3=plt.imread('3_groix_thirdProcessing.bmp')\r\nim4=np.copy(im3)\r\n\r\ndeplacement=[(0,0),(-4,1),(-4,2),(-4,3),(-4,4),(-3,4),(-2,4),(-1,4),(0,4),(1,4),(2,4),(3,4),(4,4),(4,3),(4,2),(4,1),(4,0),(4,-1),(4,-2),(4,-3),(4,-4),(3,-4),(2,-4),(1,-4),(0,-4),(-1,-4),(-2,-4),(-3,-4),(-4,-4),(-4,-3),(-4,-2),(-4,-1)]\r\n\r\ntableau=np.zeros((im2.shape[0],im2.shape[1]),dtype=int)\r\n\r\nfor k in range(im2.shape[0]):\r\n for l in range(im2.shape[1]):\r\n if list(im2[k,l,:])!=[185,122,8]:\r\n tableau[k][l]=int(im2[k,l,1]/7)\r\n\r\nfor k in range(im4.shape[0]):\r\n for l in range(im4.shape[1]):\r\n if list(im4[k,l,:])==[0,0,255]:\r\n im4[k,l,1]=255\r\n\r\nfor k in range(20):\r\n nb_repet=5000\r\n compteur=0\r\n print(20-k)\r\n if k<0:\r\n print(\"At which coordinates do you want to place your first particle?\")\r\n print(\"(x in [0,\",im4.shape[0],\"] and y in [0,\",im4.shape[1],'])')\r\n x=int(input(\"Coordinates in x: \"))\r\n y=int(input(\"Coordinates in y: \"))\r\n if k==0:\r\n y=798\r\n x=130\r\n elif k==1:\r\n y=1247\r\n x=400\r\n else:\r\n x=rd.randrange(0,im4.shape[0]-1)\r\n y=rd.randrange(0,im4.shape[1]-1)\r\n while compteur < nb_repet:\r\n if tableau[x][y]==0:\r\n im4[x,y,0]=0\r\n im4[x,y,0]=0\r\n im4[x,y,0]=0\r\n compteur=nb_repet\r\n else:\r\n x,y=x+deplacement[tableau[x][y]][0],y+deplacement[tableau[x][y]][1]\r\n im4[x,y,0]=255\r\n im4[x,y,1]=0\r\n im4[x,y,2]=0\r\n compteur=compteur+1\r\n\r\nplt.imsave('Result.bmp',im4)\r\n \r\n", "repo_name": "lucas-mrq/TIPE", "sub_path": "3rdProgram Groix_Island/C_thirdStep_ParticleProcessing.py", "file_name": "C_thirdStep_ParticleProcessing.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "matplotlib.pyplot.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "20676136851", "text": "#!/usr/bin/env python3\n\nimport png\nimport io\nimport os\nimport unittest\nfrom unittest.mock import Mock\nfrom unittest.mock import patch\n\nfrom pico8 import util\nfrom pico8.game import game\nfrom pico8.lua import lexer\nfrom pico8.lua import parser\n\nVALID_P8_HEADER = b'''pico-8 cartridge // http://www.pico-8.com\nversion 4\n'''\n\nINVALID_P8_HEADER_ONE = b'''INVALID HEADER\nversion 4\n'''\n\nINVALID_P8_HEADER_TWO = b'''pico-8 cartridge // http://www.pico-8.com\nINVALID HEADER\n'''\n\nVALID_P8_LUA_SECTION_HEADER = b'__lua__\\n'\n\nVALID_P8_FOOTER = (\n b'\\n__gfx__\\n' + ((b'0' * 128) + b'\\n') * 128 +\n b'__gff__\\n' + ((b'0' * 256) + b'\\n') * 2 +\n b'__map__\\n' + ((b'0' * 256) + b'\\n') * 32 +\n b'__sfx__\\n' + b'0001' + (b'0' * 164) + b'\\n' +\n (b'001' + (b'0' * 165) + b'\\n') * 63 +\n b'__music__\\n' + b'00 41424344\\n' * 64 + b'\\n\\n')\n\nCODE_UNCOMPRESSED_BYTES = bytearray([\n 102, 111, 114, 32, 105, 61, 49, 44, 49, 48, 32, 100, 111, 10, 32, 32, 112,\n 114, 105, 110, 116, 40, 34, 104, 105, 32, 34, 46, 46, 105, 41, 10, 101, 110,\n 100, 10])\n\nCODE_COMPRESSED_BYTES = bytearray([\n 58, 99, 58, 0, 0, 142, 0, 0, 0, 45, 0, 45, 2, 31, 27, 25, 17, 2, 32, 21, 32,\n 24, 17, 1, 60, 110, 13, 33, 32, 20, 27, 30, 1, 1, 18, 27, 30, 2, 21, 51, 4,\n 57, 4, 3, 2, 16, 27, 1, 2, 2, 28, 30, 21, 26, 32, 42, 0, 34, 20, 21, 2, 0,\n 34, 56, 56, 21, 43, 1, 0, 9, 2, 21, 18, 2, 21, 2, 41, 2, 6, 2, 32, 20, 17,\n 26, 61, 16, 62, 116, 14, 33, 38, 38, 0, 34, 62, 34, 61, 242, 62, 244, 0, 9,\n 2, 17, 26, 16, 1, 60, 36])\n\n# Intentional mix of tabs and spaces, don't change!\nCODE_COMPRESSED_AS_STRING = b'''-- some title\n-- some author\n\nfor i=1,10 do\n print(\"hi \"..i)\n\t if i % 3 then\n\t print(\"buzz\")\n\t print(\"buzz\")\n\t print(\"buzz\")\n\t end\nend\n'''\n\nFILE_TEST_GOL_CODE_COMPRESSED_HEAD = [\n 58, 99, 58, 0, 4, 194, 0, 0, 0, 45, 0, 45, 2, 19, 13, 25, 17, 2, 27, 18,\n 2, 24, 21, 18]\n\nTEST_PNG = {\n 'width': 3,\n 'height': 3,\n 'data': [[0xec, 0xdc, 0xcc, 0xfc, # 0\n 0xac, 0x9c, 0x8d, 0xbc, # 1\n 0x6c, 0x5c, 0x4e, 0x7c], # 2\n [0xec, 0xdc, 0xcf, 0xfc, # 3\n 0xac, 0x9d, 0x8c, 0xbc, # 4\n 0x6c, 0x5d, 0x4d, 0x7c], # 5\n [0xec, 0xdd, 0xce, 0xfc, # 6\n 0xac, 0x9d, 0x8f, 0xbc, # 7\n 0x6f, 0x5f, 0x4f, 0x7f]], # 255\n 'attrs': {'planes': 4}\n}\n\nTEST_PNG_PICODATA = [0, 1, 2, 3, 4, 5, 6, 7, 255]\n\nTEST_PNG_BLANK_DATA = [\n [0xef, 0xdf, 0xcf, 0xff,\n 0xaf, 0x9f, 0x8f, 0xbf,\n 0x6f, 0x5f, 0x4f, 0x7f],\n [0xef, 0xdf, 0xcf, 0xff,\n 0xaf, 0x9f, 0x8f, 0xbf,\n 0x6f, 0x5f, 0x4f, 0x7f],\n [0xef, 0xdf, 0xcf, 0xff,\n 0xaf, 0x9f, 0x8f, 0xbf,\n 0x6f, 0x5f, 0x4f, 0x7f]]\n\n\nclass TestP8Game(unittest.TestCase):\n def setUp(self):\n self.testdata_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n 'testdata')\n\n def testFromP8File(self):\n g = game.Game.from_p8_file(io.BytesIO(\n VALID_P8_HEADER +\n VALID_P8_LUA_SECTION_HEADER +\n VALID_P8_FOOTER))\n self.assertEqual(4, g.lua._version)\n self.assertEqual(4, g.gfx._version)\n self.assertEqual(4, g.gff._version)\n self.assertEqual(4, g.map._version)\n self.assertEqual(4, g.sfx._version)\n self.assertEqual(4, g.music._version)\n\n def testInvalidP8HeaderErrorMsg(self):\n # coverage\n txt = str(game.InvalidP8HeaderError())\n\n def testInvalidP8SectionErrorMsg(self):\n # coverage\n txt = str(game.InvalidP8SectionError('bad'))\n\n def testInvalidP8HeaderLineOne(self):\n self.assertRaises(\n game.InvalidP8HeaderError,\n game.Game.from_p8_file,\n io.BytesIO(\n INVALID_P8_HEADER_ONE +\n VALID_P8_LUA_SECTION_HEADER +\n VALID_P8_FOOTER))\n\n def testInvalidP8HeaderLineTwo(self):\n self.assertRaises(\n game.InvalidP8HeaderError,\n game.Game.from_p8_file,\n io.BytesIO(\n INVALID_P8_HEADER_TWO +\n VALID_P8_LUA_SECTION_HEADER +\n VALID_P8_FOOTER))\n\n def testInvalidP8Section(self):\n self.assertRaises(\n game.InvalidP8SectionError,\n game.Game.from_p8_file,\n io.BytesIO(\n VALID_P8_HEADER +\n VALID_P8_LUA_SECTION_HEADER +\n b'\\n__bad__\\n\\n' +\n VALID_P8_FOOTER))\n\n def testFromP8FileGoL(self):\n p8path = os.path.join(self.testdata_path, 'test_gol.p8')\n with open(p8path, 'rb') as fh:\n p8game = game.Game.from_p8_file(fh)\n # TODO: validate game\n\n\nclass TestP8PNGGame(unittest.TestCase):\n def setUp(self):\n self.testdata_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n 'testdata')\n\n def testFromP8PNGFileV0(self):\n pngpath = os.path.join(self.testdata_path, 'test_cart_memdump.p8.png')\n with open(pngpath, 'rb') as fh:\n pnggame = game.Game.from_p8png_file(fh)\n first_stat = pnggame.lua.root.stats[0]\n self.assertTrue(isinstance(first_stat, parser.StatFunctionCall))\n tokens = pnggame.lua.tokens\n self.assertEqual(lexer.TokComment(b'-- memory dump'), tokens[0])\n self.assertEqual(lexer.TokNewline(b'\\n'), tokens[1])\n self.assertEqual(lexer.TokComment(b'-- by dddaaannn'), tokens[2])\n self.assertEqual(lexer.TokNewline(b'\\n'), tokens[3])\n\n def testFromP8PNGFile(self):\n pngpath = os.path.join(self.testdata_path, 'test_gol.p8.png')\n with open(pngpath, 'rb') as fh:\n pnggame = game.Game.from_p8png_file(fh)\n # TODO: validate game\n\n def testGetCodeFromBytesUncompressed(self):\n codedata = [0] * (0x8000 - 0x4300)\n codedata[:len(CODE_UNCOMPRESSED_BYTES)] = CODE_UNCOMPRESSED_BYTES\n code_length, code, compressed_size = \\\n game.Game.get_code_from_bytes(codedata, 1)\n self.assertEqual(len(CODE_UNCOMPRESSED_BYTES), code_length)\n # (added trailing newline)\n self.assertEqual(CODE_UNCOMPRESSED_BYTES + b'\\n', code)\n self.assertIsNone(compressed_size)\n\n def testGetCodeFromBytesCompressed(self):\n codedata = [0] * (0x8000 - 0x4300)\n codedata[:len(CODE_COMPRESSED_BYTES)] = CODE_COMPRESSED_BYTES\n code_length, code, compressed_size = \\\n game.Game.get_code_from_bytes(codedata, 1)\n self.assertEqual(len(CODE_COMPRESSED_AS_STRING), code_length)\n self.assertEqual(CODE_COMPRESSED_AS_STRING, code)\n self.assertEqual(len(CODE_COMPRESSED_BYTES), compressed_size)\n\n def testPngToPicodataSimple(self):\n picodata = game.Game.get_picodata_from_pngdata(\n TEST_PNG['width'], TEST_PNG['height'],\n TEST_PNG['data'], TEST_PNG['attrs'])\n self.assertEqual(TEST_PNG_PICODATA, picodata)\n\n def testPngToPicodataFromFile(self):\n pngpath = os.path.join(self.testdata_path, 'test_gol.p8.png')\n with open(pngpath, 'rb') as fh:\n width, height, data, attrs = png.Reader(file=fh).read()\n data = list(data)\n picodata = game.Game.get_picodata_from_pngdata(\n width, height, data, attrs)\n self.assertEqual(len(picodata), 32800)\n self.assertEqual(FILE_TEST_GOL_CODE_COMPRESSED_HEAD,\n picodata[0x4300:\n 0x4300 + len(FILE_TEST_GOL_CODE_COMPRESSED_HEAD)])\n\n def testPicodataToPngSimple(self):\n pngdata = game.Game.get_pngdata_from_picodata(TEST_PNG_PICODATA,\n TEST_PNG_BLANK_DATA,\n TEST_PNG['attrs'])\n for row_i in range(len(pngdata)):\n self.assertEqual(bytearray(TEST_PNG['data'][row_i]), pngdata[row_i])\n\n def testCompressCodeHelloExample(self):\n test_str = (b'a=\"hello\"\\nb=\"hello also\"\\nb=\"hello also\"\\n'\n b'b=\"hello also\"\\nb=\"hello also\"\\nb=\"hello also\"\\n'\n b'b=\"hello also\"\\n\\n')\n comp_result = game.Game.compress_code(test_str)\n code_length_bytes = bytes([len(test_str) >> 8, len(test_str) & 255])\n code_bytes = b''.join([b':c:\\0', code_length_bytes, b'\\0\\0',\n comp_result])\n decomp_result = game.Game.decompress_code(code_bytes)\n self.assertEqual(decomp_result[1], test_str)\n\n p8_comp_result = bytearray([\n 13, 51, 0, 34, 20, 17, 24, 24, 27, 0, 34, 1, 14, 60, 90, 2, 13, 24,\n 31, 60, 223, 61, 254, 62, 253, 63, 252, 64, 171, 1])\n self.assertEqual(len(comp_result), len(p8_comp_result))\n self.assertEqual(comp_result, p8_comp_result)\n\n\nclass TestGameToP8(unittest.TestCase):\n def setUp(self):\n self.testdata_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n 'testdata')\n self.orig_error_stream = util._error_stream\n util._error_stream = io.StringIO()\n\n def tearDown(self):\n util._error_stream = self.orig_error_stream\n\n def testToP8FileFromP8(self):\n with open(os.path.join(self.testdata_path, 'test_cart.p8'), 'rb') as fh:\n orig_game = game.Game.from_p8_file(fh)\n with open(os.path.join(self.testdata_path, 'test_cart.p8'), 'rb') as fh:\n expected_game_p8 = fh.read()\n outstr = io.BytesIO()\n orig_game.to_p8_file(outstr)\n self.assertEqual(expected_game_p8, outstr.getvalue())\n\n def testToP8FileFromP8PreservesLabel(self):\n with open(os.path.join(self.testdata_path, 'test_cart_with_label.p8'), 'rb') as fh:\n orig_game = game.Game.from_p8_file(fh)\n with open(os.path.join(self.testdata_path, 'test_cart_with_label.p8'), 'rb') as fh:\n expected_game_p8 = fh.read()\n outstr = io.BytesIO()\n orig_game.to_p8_file(outstr)\n self.assertEqual(expected_game_p8, outstr.getvalue())\n\n def testToP8FileFromPng(self):\n with open(os.path.join(self.testdata_path, 'test_cart.p8.png'),\n 'rb') as fh:\n orig_game = game.Game.from_p8png_file(fh)\n with open(os.path.join(self.testdata_path, 'test_cart.p8'), 'rb') as fh:\n expected_game_p8 = fh.read()\n outstr = io.BytesIO()\n orig_game.to_p8_file(outstr)\n self.assertEqual(expected_game_p8, outstr.getvalue())\n\n def testCharCountWarning(self):\n g = game.Game.make_empty_game(filename='test')\n g.lua.update_from_lines(\n [b'-- 345678901234567890123456789012345678\\n'] * 820)\n outstr = io.BytesIO()\n g.to_p8_file(outstr, filename='test')\n self.assertTrue(util._error_stream.getvalue().startswith(\n 'test: warning: character count'))\n\n def testTokenCountWarning(self):\n g = game.Game.make_empty_game()\n g.lua.update_from_lines(\n [b'a=b=c=d=e=f=g=h=i=j=k=l=m=n=o=p=q=r=s=t=u\\n'] * 199 +\n [b'a=b=c=d=e=f=g=h=i=j=k=l=m=n=o=p=q=r=s=t=u'])\n outstr = io.BytesIO()\n g.to_p8_file(outstr)\n self.assertTrue(util._error_stream.getvalue().startswith(\n 'warning: token count'))\n\n\nclass TestGameToP8PNG(unittest.TestCase):\n def setUp(self):\n self.testdata_path = os.path.join(\n os.path.dirname(os.path.dirname(os.path.dirname(__file__))),\n 'testdata')\n self.orig_error_stream = util._error_stream\n util._error_stream = io.StringIO()\n\n def tearDown(self):\n util._error_stream = self.orig_error_stream\n\n # TODO:\n # def testToPngFromPng(self):\n # with open(os.path.join(self.testdata_path, 'test_cart.p8.png'),\n # 'rb') as fh:\n # orig_game = game.Game.from_p8png_file(fh)\n # with open(os.path.join(self.testdata_path, 'test_cart.p8.png'),\n # 'rb') as fh:\n # expected_game_p8 = fh.read()\n # outstr = io.BytesIO()\n # orig_game.to_p8png_file(\n # outstr,\n # label_fname=os.path.join(self.testdata_path,\n # 'test_cart.p8.png'))\n # self.assertEqual(expected_game_p8, outstr.getvalue())\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "Kacperek2137/BallzArt", "sub_path": "tests/pico8/game/game_test.py", "file_name": "game_test.py", "file_ext": "py", "file_size_in_byte": 12394, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "unittest.TestCase", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8_file", "line_number": 104, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 104, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 104, "usage_type": "call"}, {"api_name": "pico8.game.game.InvalidP8HeaderError", "line_number": 117, "usage_type": "call"}, {"api_name": "pico8.game.game", "line_number": 117, "usage_type": "name"}, {"api_name": "pico8.game.game.InvalidP8SectionError", "line_number": 121, "usage_type": "call"}, {"api_name": "pico8.game.game", "line_number": 121, "usage_type": "name"}, {"api_name": "pico8.game.game.InvalidP8HeaderError", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 125, "usage_type": "name"}, {"api_name": "pico8.game.game.Game", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 126, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 127, "usage_type": "call"}, {"api_name": "pico8.game.game.InvalidP8HeaderError", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 134, "usage_type": "name"}, {"api_name": "pico8.game.game.Game", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 135, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 136, "usage_type": "call"}, {"api_name": "pico8.game.game.InvalidP8SectionError", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 143, "usage_type": "name"}, {"api_name": "pico8.game.game.Game", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 144, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8_file", "line_number": 154, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 154, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8png_file", "line_number": 167, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 167, "usage_type": "name"}, {"api_name": "pico8.lua.parser.StatFunctionCall", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pico8.lua.parser", "line_number": 169, "usage_type": "name"}, {"api_name": "pico8.lua.lexer.TokComment", "line_number": 171, "usage_type": "call"}, {"api_name": "pico8.lua.lexer", "line_number": 171, "usage_type": "name"}, {"api_name": "pico8.lua.lexer.TokNewline", "line_number": 172, "usage_type": "call"}, {"api_name": "pico8.lua.lexer", "line_number": 172, "usage_type": "name"}, {"api_name": "pico8.lua.lexer.TokComment", "line_number": 173, "usage_type": "call"}, {"api_name": "pico8.lua.lexer", "line_number": 173, "usage_type": "name"}, {"api_name": "pico8.lua.lexer.TokNewline", "line_number": 174, "usage_type": "call"}, {"api_name": "pico8.lua.lexer", "line_number": 174, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8png_file", "line_number": 179, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 179, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.get_code_from_bytes", "line_number": 186, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 186, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.get_code_from_bytes", "line_number": 196, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 196, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.get_picodata_from_pngdata", "line_number": 202, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 202, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "png.Reader", "line_number": 210, "usage_type": "call"}, {"api_name": "pico8.game.game.Game.get_picodata_from_pngdata", "line_number": 212, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 212, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.get_pngdata_from_picodata", "line_number": 220, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 220, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.compress_code", "line_number": 230, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 230, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 230, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.decompress_code", "line_number": 234, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 234, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 234, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 244, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pico8.util._error_stream", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 249, "usage_type": "name"}, {"api_name": "pico8.util._error_stream", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 250, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 250, "usage_type": "call"}, {"api_name": "pico8.util._error_stream", "line_number": 253, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 253, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 256, "usage_type": "call"}, {"api_name": "os.path", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8_file", "line_number": 257, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 257, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 265, "usage_type": "call"}, {"api_name": "os.path", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8_file", "line_number": 266, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 266, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "pico8.game.game.Game.from_p8png_file", "line_number": 276, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 276, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 276, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "io.BytesIO", "line_number": 279, "usage_type": "call"}, {"api_name": "pico8.game.game.Game.make_empty_game", "line_number": 284, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 284, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 284, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 287, "usage_type": "call"}, {"api_name": "pico8.util._error_stream.getvalue", "line_number": 289, "usage_type": "call"}, {"api_name": "pico8.util._error_stream", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 289, "usage_type": "name"}, {"api_name": "pico8.game.game.Game.make_empty_game", "line_number": 293, "usage_type": "call"}, {"api_name": "pico8.game.game.Game", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pico8.game.game", "line_number": 293, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 297, "usage_type": "call"}, {"api_name": "pico8.util._error_stream.getvalue", "line_number": 299, "usage_type": "call"}, {"api_name": "pico8.util._error_stream", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 299, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 303, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 305, "usage_type": "call"}, {"api_name": "os.path", "line_number": 305, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path", "line_number": 306, "usage_type": "attribute"}, {"api_name": "pico8.util._error_stream", "line_number": 308, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 308, "usage_type": "name"}, {"api_name": "pico8.util._error_stream", "line_number": 309, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 309, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 309, "usage_type": "call"}, {"api_name": "pico8.util._error_stream", "line_number": 312, "usage_type": "attribute"}, {"api_name": "pico8.util", "line_number": 312, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 331, "usage_type": "call"}]} +{"seq_id": "39622196892", "text": "import re\nimport socks\nimport time\nimport config\nimport requests\nimport mysql.connector\nfrom telethon.tl.functions.messages import GetHistoryRequest\nfrom telethon.tl.functions.contacts import ResolveUsernameRequest\nfrom telethon.tl.functions.channels import GetMessagesRequest\nfrom telethon.tl.functions.messages import GetHistoryRequest, ReadHistoryRequest\nfrom telethon import TelegramClient, events, sync\nimport telethon.sync\nfrom telethon.tl.types import PeerUser, PeerChat, PeerChannel\nimport telebot\nimport sqlite3\n\napi_id = 988074\napi_hash = 'a5ec8b7b6dbeedc2514ca7e4ba200c13'\n\n\nclient = TelegramClient('btc_pay', api_id, api_hash)\nclient.start()\nbot_token = config.token\n\nfrom connect import connect\ndef get_user_infa(userid):\n\tanswer = telebot.TeleBot(config.token).get_chat(userid)\n\treturn f'{answer.first_name} ({userid})'\n\n\ndef main():\n\tconnection,q = connect()\n\tglobal i\n\tq.execute(f\"SELECT * FROM ugc_buys where id != 'del' and wallet = 'banker'\")\n\tinfo = q.fetchall()\n\tinfoo = info\n\tfor i in infoo:\n\t\tif i != None and i[1] != 'del':\n\t\t\tprint('NEW_CHECK')\n\t\t\ttime.sleep(1)\n\t\t\tclient.send_message('BTC_CHANGE_BOT', f'/start {i[0]}')\n\t\t\ttime.sleep(5)\n\t\t\tanswer = check()\n\t\t\tif 'Вы получили' in str(answer) and 'RUB' in str(answer):\n\t\t\t\tsumma_plus_balance = str(answer).split('BTC (')[1].split(' RUB')[0].replace(',','.').replace(' ','')\n\t\t\t\tq.execute(f\"update ugc_users set balance = balance + '{summa_plus_balance}' where userid = '{i[1]}'\")\n\t\t\t\tconnection.commit()\n\t\t\t\tq.execute(f\"update ugc_buys set summa = '{summa_plus_balance}' where id = '{i[0]}'\")\n\t\t\t\tconnection.commit()\n\t\t\t\tq.execute(f\"update ugc_buys set id = 'del' where id = '{i[0]}'\")\n\t\t\t\tconnection.commit()\n\t\t\t\ttry:\n\t\t\t\t\ttelebot.TeleBot(config.token).send_message(i[1],f'''🔥 Баланс пополнен на {summa_plus_balance}₽''',parse_mode='HTML')\n\t\t\t\t\ttelebot.TeleBot(config.token).send_message('-763339921', f'''Пользователь: {get_user_infa(i[1])}\\nПополнил баланс на {summa_plus_balance}₽\\nСистема: BTC Banker''',parse_mode='HTML')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\telif 'Упс, кажется, данный чек успел обналичить кто-то другой 😟' in str(answer):\n\t\t\t\ttry:\n\t\t\t\t\tbot = telebot.TeleBot(bot_token).send_message(i[1], 'Ошибка в чеке')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tq.execute(f'DELETE FROM ugc_buys WHERE id = \"{i[0]}\"')\n\t\t\t\tconnection.commit()\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tbot = telebot.TeleBot(bot_token).send_message(i[1], 'Ошибка в чеке')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tq.execute(f'DELETE FROM ugc_buys WHERE id = \"{i[0]}\"')\n\t\t\t\tconnection.commit()\n\ndef check():\n\tchannel_username='BTC_CHANGE_BOT'\n\tchannel_entity=client.get_entity(channel_username)\n\tposts = client(GetHistoryRequest(peer=channel_entity,limit=1,offset_date=None,offset_id=0,max_id=0,min_id=0,add_offset=0,hash=0))\n\tmesages = posts.messages\n\tfor i in mesages:\n\t\tanswer = i.message\n\t\treturn answer\n\nwhile True:\n\ttime.sleep(2)\n\tmain()\n\nclient.run_until_disconnected()", "repo_name": "AKAKWKWKWKW/HZNEWQMK", "sub_path": "btc_pay.py", "file_name": "btc_pay.py", "file_ext": "py", "file_size_in_byte": 3064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "telethon.TelegramClient", "line_number": 21, "usage_type": "call"}, {"api_name": "config.token", "line_number": 23, "usage_type": "attribute"}, {"api_name": "telebot.TeleBot", "line_number": 27, "usage_type": "call"}, {"api_name": "config.token", "line_number": 27, "usage_type": "attribute"}, {"api_name": "connect.connect", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 53, "usage_type": "call"}, {"api_name": "config.token", "line_number": 53, "usage_type": "attribute"}, {"api_name": "telebot.TeleBot", "line_number": 54, "usage_type": "call"}, {"api_name": "config.token", "line_number": 54, "usage_type": "attribute"}, {"api_name": "telebot.TeleBot", "line_number": 59, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 66, "usage_type": "call"}, {"api_name": "telethon.tl.functions.messages.GetHistoryRequest", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "32214749691", "text": "import boto3\nimport json\nfrom lambdaflashcards import LambdaFlashcards\nimport urllib\nimport time\nimport uuid\nfrom scraper import Scraper\nfrom pipelines_sagemaker import QGSagemaker, QASagemaker\n\ndef generate(event, context):\n src = event[\"queryStringParameters\"][\"src\"]\n src = urllib.parse.unquote(src).split(\"=\")[-1]\n print(f\"FUNCTION generate IN main.py CALLED WITH ARGUMENT {src}\")\n flashcards = LambdaFlashcards()\n print(\"LambdaFlashcards OBJECT INSTANTIATED\")\n questions = flashcards(src)\n r = {str(uuid.uuid4()) : questions}\n response = {\n \"isBase64Encoded\":False,\n \"statusCode\": 200,\n \"headers\":{\n \"Content-Type\":\"application/json\",\n \"Access-Control-Allow-Origin\":\"*\",\n },\n \"body\": json.dumps(r)\n }\n\n print(r)\n return response\n\ndef segment_text(event, context):\n batch_size=50\n src = event[\"queryStringParameters\"][\"src\"]\n src = urllib.parse.unquote(src).split(\"=\")[-1]\n scraper = Scraper()\n context=scraper.get_text(src)\n tokens = context.split()\n question_batches = [' '.join(tokens[i:i+batch_size]) for i in range(0, len(tokens), batch_size)]\n ans_batches = {}\n for i, e in enumerate(question_batches):\n if i == 0:\n ans_batches[i] = \" \".join(question_batches[i:i+2])\n else:\n ans_batches[i] = \" \".join(question_batches[i-1:i+2])\n\n r = {str(uuid.uuid4()): ans_batches}\n response = {\n \"isBase64Encoded\":False,\n \"statusCode\":200,\n \"headers\":{\n \"Content-Type\":\"application/json\",\n \"Access-Control-Allow-Origin\":\"*\",\n },\n \"body\":json.dumps(r)\n }\n\n print(r)\n return response\n\ndef generate_single(event, context):\n def filter(question):\n return (\n len(question.split()) <= 5 or\n question.split()[0].lower() in [\"when\", \"who\"] or\n question[-1].lower() != \"?\" or\n not [question_word in question.lower() for question_word in [\"what\", \"where\", \"how\"]]\n )\n\n context = event[\"ctx\"]\n context_list=context.split(\" \")\n if len(context_list) <= 100:\n qg_context = \" \".join(context_list[:50])\n else:\n qg_context = \" \".join(context_list[50:100])\n\n now = time.time()\n qg_model = QGSagemaker()\n qa_model = QASagemaker()\n print(f\"Time to initialize models: {now-time.time()}s\")\n now = time.time()\n question = qg_model(qg_context)[0]\n print(f\"Time to generate question: {now-time.time()}s\")\n if not filter(question):\n now = time.time()\n payload = {\n \"question\": question,\n \"answer\": qa_model(question, context),\n \"context\": context,\n }\n print(f\"Time to generate answer: {now-time.time()}s\")\n else:\n payload = {\n \"question\": None,\n \"answer\": None,\n \"context\": context,\n }\n\n r = { str(uuid.uuid4()) : payload }\n response = {\n \"isBase64Encoded\":False,\n \"statusCode\":200,\n \"headers\":{\n \"Content-Type\":\"application/json\",\n \"Access-Control-Allow-Origin\":\"*\",\n },\n \"body\":json.dumps(r)\n }\n\n return response\n", "repo_name": "kanyesthaker/qgqa-flashcards", "sub_path": "src/aws/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3201, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 64, "dataset": "github-code", "pt": "42", "api": [{"api_name": "urllib.parse.unquote", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 12, "usage_type": "attribute"}, {"api_name": "lambdaflashcards.LambdaFlashcards", "line_number": 14, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 34, "usage_type": "attribute"}, {"api_name": "scraper.Scraper", "line_number": 35, "usage_type": "call"}, {"api_name": "scraper.get_text", "line_number": 36, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "pipelines_sagemaker.QGSagemaker", "line_number": 77, "usage_type": "call"}, {"api_name": "pipelines_sagemaker.QASagemaker", "line_number": 78, "usage_type": "call"}, {"api_name": "time.time", "line_number": 79, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "75117568767", "text": "from stellar_sdk import Keypair, Server, TransactionBuilder, Network\nfrom os import listdir\n\n# initialization code to connect to Stellar network and load account to send XLM from\nserver = Server(horizon_url=\"https://horizon.stellar.org\")\n\nkeypair = Keypair.from_secret('YOUR SECRET KEY HERE') # You need to put your secret key here to send from your account\n\nsenderAccount = server.load_account(account_id=keypair.public_key)\n\n\ndef sendXLM(destinationAddress: str, amount: float):\n # Function to create a single transaction to send XLM to an address\n # \n # destinationAddress: str - the public address to send XLM to\n # amount: float - the amount of XLM to send\n\n transaction = TransactionBuilder(\n source_account=senderAccount, network_passphrase=Network.PUBLIC_NETWORK_PASSPHRASE, base_fee=100\n ).add_text_memo(\"Your Memo Here\").append_payment_op(\n destinationAddress, amount=str(amount), asset_code=\"XLM\"\n ).set_timeout(30).build()\n\n transaction.sign(keypair)\n response = server.submit_transaction(transaction)\n\n return response\n\n\ndef readAddresses(filename: str):\n # Function to read addresses from a text file and return them as a list\n #\n # filename: str - the name of the text file where the addresses are stored; must be in the same folder as this script\n # example: \"addresses.txt\"\n\n result = []\n with open(filename, 'r') as f:\n for line in f:\n if line.strip() != '':\n result.append(line.strip())\n \n return result\n\n\ndef main():\n \n filename = \"addresses.txt\" # change this if you want to call the file something else\n amountXLM = 0.01 # how much XLM you want to send to each address; change this if you want\n\n if filename in listdir():\n addresses = readAddresses(filename)\n\n for address in addresses:\n sendXLM(address, amountXLM)\n print(str(amountXLM) + ' XLM sent to ' + address)\n\n print(\"Payments sent successfully.\")\n\n else:\n print('Error: File \"' + filename + '\" not found.')\n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "alexmulligan/file_download", "sub_path": "XLM-Sender.py", "file_name": "XLM-Sender.py", "file_ext": "py", "file_size_in_byte": 2114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "stellar_sdk.Server", "line_number": 5, "usage_type": "call"}, {"api_name": "stellar_sdk.Keypair.from_secret", "line_number": 7, "usage_type": "call"}, {"api_name": "stellar_sdk.Keypair", "line_number": 7, "usage_type": "name"}, {"api_name": "stellar_sdk.TransactionBuilder", "line_number": 18, "usage_type": "call"}, {"api_name": "stellar_sdk.Network.PUBLIC_NETWORK_PASSPHRASE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "stellar_sdk.Network", "line_number": 19, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "20021417928", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n`Maze` is the top layer object on which we run the algorithms.\n\n`GIFSurface` is the bottom layer object that handles the information\nabout the output GIF image.\n\n`Animation` is the middle layer object that controls how\na `Maze` object is rendered to a `GIFSurface` object.\n\"\"\"\nfrom io import BytesIO\nfrom functools import partial\nfrom PIL import Image\nimport encoder\n\n\nclass Maze(object):\n \"\"\"\n This class defines the basic structure of a maze and some operations on it.\n A maze is represented by a grid with `height` rows and `width` columns,\n each cell in the maze has 4 possible states:\n 0: it's a wall\n 1: it's in the tree\n 2: it's in the path\n 3: it's filled (this will not be used until the maze-searching animation)\n Initially all cells are walls.\n Adjacent cells in the maze are spaced out by one cell.\n \"\"\"\n\n WALL = 0\n TREE = 1\n PATH = 2\n FILL = 3\n\n def __init__(self, width, height, mask):\n \"\"\"\n Parameters\n ----------\n width, height: size of the maze, must both be odd integers.\n\n mask: `None` or an file-like image or an instance of PIL's Image class.\n If not `None` then this mask image must be of binary type:\n the black pixels are considered as `walls` and are overlayed\n on top of the grid graph. Note the walls must preserve the\n connectivity of the grid graph, otherwise the program will\n not terminate.\n \"\"\"\n if (width * height % 2 == 0):\n raise ValueError('The width and height must both be odd integers.')\n\n self.width = width\n self.height = height\n self._grid = [[0] * height for _ in range(width)]\n self._num_changes = 0 # a counter holds how many cells are changed.\n self._frame_box = None # a 4-tuple maintains the region that to be updated.\n\n if mask is not None:\n if isinstance(mask, Image.Image):\n mask = mask.convert('L').resize((width, height))\n else:\n mask = Image.open(mask).convert('L').resize((width, height))\n\n def get_mask_pixel(cell):\n return mask is None or mask.getpixel(cell) == 255\n\n self.cells = []\n for y in range(0, height, 2):\n for x in range(0, width, 2):\n if get_mask_pixel((x, y)):\n self.cells.append((x, y))\n\n def neighborhood(cell):\n x, y = cell\n neighbors = []\n if x >= 2 and get_mask_pixel((x - 2, y)):\n neighbors.append((x - 2, y))\n if y >= 2 and get_mask_pixel((x, y - 2)):\n neighbors.append((x, y - 2))\n if x <= width - 3 and get_mask_pixel((x + 2, y)):\n neighbors.append((x + 2, y))\n if y <= height - 3 and get_mask_pixel((x, y + 2)):\n neighbors.append((x, y + 2))\n return neighbors\n\n self._graph = {v: neighborhood(v) for v in self.cells}\n self.scaling = 1\n self.translation = (0, 0)\n\n def get_neighbors(self, cell):\n return self._graph[cell]\n\n def mark_cell(self, cell, value):\n \"\"\"Mark a cell and update `frame_box` and `num_changes`.\"\"\"\n x, y = cell\n self._grid[x][y] = value\n self._num_changes += 1\n\n if self._frame_box is not None:\n left, top, right, bottom = self._frame_box\n self._frame_box = (min(x, left), min(y, top),\n max(x, right), max(y, bottom))\n else:\n self._frame_box = (x, y, x, y)\n\n def mark_space(self, c1, c2, value):\n \"\"\"Mark the space between two adjacent cells.\"\"\"\n c = ((c1[0] + c2[0]) // 2, (c1[1] + c2[1]) // 2)\n self.mark_cell(c, value)\n\n def mark_path(self, path, value):\n \"\"\"Mark the cells in a path and the spaces between them.\"\"\"\n for cell in path:\n self.mark_cell(cell, value)\n for c1, c2 in zip(path[1:], path[:-1]):\n self.mark_space(c1, c2, value)\n\n def get_cell(self, cell):\n x, y = cell\n return self._grid[x][y]\n\n def barrier(self, c1, c2):\n \"\"\"Check if two adjacent cells are connected.\"\"\"\n x = (c1[0] + c2[0]) // 2\n y = (c1[1] + c2[1]) // 2\n return self._grid[x][y] == Maze.WALL\n\n def is_wall(self, cell):\n x, y = cell\n return self._grid[x][y] == Maze.WALL\n\n def in_tree(self, cell):\n x, y = cell\n return self._grid[x][y] == Maze.TREE\n\n def in_path(self, cell):\n x, y = cell\n return self._grid[x][y] == Maze.PATH\n\n def reset(self):\n self._num_changes = 0\n self._frame_box = None\n\n @property\n def frame_box(self):\n return self._frame_box\n\n @property\n def num_changes(self):\n return self._num_changes\n\n def scale(self, c):\n self.scaling = c\n return self\n\n def translate(self, v):\n self.translation = v\n return self\n\n\nclass GIFSurface(object):\n \"\"\"\n A GIFSurface is an object on which the animations are drawn,\n and which can be saved as GIF images.\n Each instance opens a BytesIO file in memory once it's created.\n The frames are temporarily written to this in-memory file for speed.\n When the animation is finished one should call the `close()` method\n to close the io.\n \"\"\"\n def __init__(self, width, height, loop=0, bg_color=None):\n \"\"\"\n ----------\n Parameters\n\n width, height: size of the image in pixels.\n\n loop: number of loops of the image.\n\n bg_color: background color index.\n \"\"\"\n self.width = width\n self.height = height\n self.loop = loop\n self.palette = None\n self._io = BytesIO()\n\n if bg_color is not None:\n self.write(encoder.rectangle(0, 0, width, height, bg_color))\n\n @classmethod\n def from_image(cls, img_file, loop=0):\n \"\"\"\n Create a surface from a given image file.\n The size of the returned surface is the same with the image's.\n The image is then painted as the background.\n \"\"\"\n # the image file usually contains more than 256 colors\n # so we need to convert it to gif format first.\n with BytesIO() as temp_io:\n Image.open(img_file).convert('RGB').save(temp_io, format='gif')\n img = Image.open(temp_io).convert('RGB')\n surface = cls(img.size[0], img.size[1], loop=loop)\n surface.write(encoder.parse_image(img))\n return surface\n\n def write(self, data):\n self._io.write(data)\n\n def set_palette(self, palette):\n \"\"\"\n Set the global color table of the GIF image.\n The user must specify at least one rgb color in it.\n `palette` must be a 1-d list of integers between 0-255.\n \"\"\"\n try:\n palette = bytearray(palette)\n except:\n raise ValueError('A 1-d list of integers in range 0-255 is expected.')\n\n if len(palette) < 3:\n raise ValueError('At least one (r, g, b) triple is required.')\n\n nbits = (len(palette) // 3).bit_length() - 1\n nbits = min(max(nbits, 1), 8)\n valid_len = 3 * (1 << nbits)\n if len(palette) > valid_len:\n palette = palette[:valid_len]\n else:\n palette.extend([0] * (valid_len - len(palette)))\n\n self.palette = palette\n\n @property\n def _gif_header(self):\n \"\"\"\n Get the `logical screen descriptor`, `global color table`\n and `loop control block`.\n \"\"\"\n if self.palette is None:\n raise ValueError('Missing global color table.')\n\n color_depth = (len(self.palette) // 3).bit_length() - 1\n screen = encoder.screen_descriptor(self.width, self.height, color_depth)\n loop = encoder.loop_control_block(self.loop)\n return screen + self.palette + loop\n\n def save(self, filename):\n \"\"\"\n Save the animation to a .gif file, note the 'wb' mode here!\n \"\"\"\n with open(filename, 'wb') as f:\n f.write(self._gif_header)\n f.write(self._io.getvalue())\n f.write(bytearray([0x3B]))\n\n def close(self):\n self._io.close()\n\n\nclass Render(object):\n \"\"\"\n This class encodes the region specified by the `frame_box` attribute of a maze\n into one frame in the GIF image.\n \"\"\"\n def __init__(self, cmap, mcl):\n \"\"\"\n cmap: a dict that maps the value of the cells to their color indices.\n\n mcl: the minimum code length for the LZW compression.\n\n A default dict is initialized so that one can set the colormap by\n just specifying what needs to be specified.\n \"\"\"\n self.colormap = {i: i for i in range(1 << mcl)}\n if cmap:\n self.colormap.update(cmap)\n self.compress = partial(encoder.lzw_compress, mcl=mcl)\n\n def __call__(self, maze):\n \"\"\"\n Encode current maze into one frame and return the encoded data.\n Note the graphics control block is not added here.\n \"\"\"\n # the image descriptor\n if maze.frame_box is not None:\n left, top, right, bottom = maze.frame_box\n else:\n left, top, right, bottom = 0, 0, maze.width - 1, maze.height - 1\n\n width = right - left + 1\n height = bottom - top + 1\n descriptor = encoder.image_descriptor(maze.scaling * left + maze.translation[0],\n maze.scaling * top + maze.translation[1],\n maze.scaling * width,\n maze.scaling * height)\n\n pixels = [self.colormap[maze.get_cell((x // maze.scaling + left,\n y // maze.scaling + top))]\n for y in range(height * maze.scaling)\n for x in range(width * maze.scaling)]\n\n # the compressed image data of this frame\n data = self.compress(pixels)\n # clear `num_changes` and `frame_box`\n maze.reset()\n\n return descriptor + data\n\n\nclass Animation(object):\n \"\"\"\n This class is the main entrance for calling algorithms to\n run and rendering the maze into the image.\n \"\"\"\n\n def __init__(self, surface):\n self._gif_surface = surface\n\n def pause(self, delay, trans_index=0):\n \"\"\"Pause the animation by padding a 1x1 invisible frame.\"\"\"\n self._gif_surface.write(encoder.pause(delay, trans_index))\n\n def paint(self, *args):\n \"\"\"Paint a rectangular region in the surface.\"\"\"\n self._gif_surface.write(encoder.rectangle(*args))\n\n def run(self, algo, maze, delay=5, trans_index=None,\n cmap=None, mcl=8, **kwargs):\n \"\"\"\n The entrance for running the animations.\n\n --------\n Parameters:\n\n algo: name of the algorithm.\n\n maze: an instance of the `Maze` class.\n\n delay: delay time between successive frames.\n\n trans_index: the transparent channel.\n `None` means there is no transparent color.\n\n cmap: a dict that maps the values of the cells in a maze\n to their color indices.\n\n mcl: see the doc for the lzw_compress.\n \"\"\"\n render = Render(cmap, mcl)\n control = encoder.graphics_control_block(delay, trans_index)\n for frame in algo(maze, render, **kwargs):\n self._gif_surface.write(control + frame)\n", "repo_name": "yo1995/Ultimate-emoticon-generator", "sub_path": "maze-with-text/gifmaze.py", "file_name": "gifmaze.py", "file_ext": "py", "file_size_in_byte": 11536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "PIL.Image.Image", "line_number": 58, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 184, "usage_type": "call"}, {"api_name": "encoder.rectangle", "line_number": 187, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 198, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 199, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 199, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 200, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 200, "usage_type": "name"}, {"api_name": "encoder.parse_image", "line_number": 202, "usage_type": "call"}, {"api_name": "encoder.screen_descriptor", "line_number": 242, "usage_type": "call"}, {"api_name": "encoder.loop_control_block", "line_number": 243, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 276, "usage_type": "call"}, {"api_name": "encoder.lzw_compress", "line_number": 276, "usage_type": "attribute"}, {"api_name": "encoder.image_descriptor", "line_number": 291, "usage_type": "call"}, {"api_name": "encoder.pause", "line_number": 320, "usage_type": "call"}, {"api_name": "encoder.rectangle", "line_number": 324, "usage_type": "call"}, {"api_name": "encoder.graphics_control_block", "line_number": 349, "usage_type": "call"}]} +{"seq_id": "16888743604", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport os\n\n__name__ = 'CorrMatrix_ABC'\n\nrelease_info = {}\ninfopath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n __name__, 'info.py'))\nwith open(infopath) as open_file:\n exec(open_file.read(), release_info)\n\nsetup(\n name = __name__,\n author = release_info['__author__'],\n author_email = release_info['__email__'],\n version = release_info['__version__'],\n url = 'https://github.com/emilleishida/CorrMatrix_ABC',\n packages = find_packages(),\n install_requires = release_info['__requires__'],\n description = release_info['__description__'],\n scripts = ['scripts/cov_matrix_definition/cm_likelihood.py']\n)\n", "repo_name": "emilleishida/CorrMatrix_ABC", "sub_path": "setup_noimport.py", "file_name": "setup_noimport.py", "file_ext": "py", "file_size_in_byte": 762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "42", "api": [{"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "73990493245", "text": "from typing import Self\nfrom time import sleep\n\nfrom fastapi import APIRouter, Depends, status\nfrom fastapi.exceptions import HTTPException\nfrom fastapi_cache.decorator import cache\n\nfrom app.core.celery_app import test_task\n\nrouter = APIRouter(prefix=\"/api/v1/celery/tasks\", tags=[\"v1\", \"Celery\"])\n\n\nclass MailMessage:\n \"\"\"\n Модель письма\n \"\"\"\n\n def __init__(self: Self, email: str, content: str) -> None:\n email_split = email.split(\"@\")\n\n if \"gmail\" in email_split or \"mail\" in email_split or \"yandex\" in email_split:\n self.email = email\n self.content = content\n else:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Низя\")\n\n\nasync def check_mail(email: str, content: str) -> str:\n \"\"\"\n Проверка почты на gmail, mail, yandex\n \"\"\"\n\n email_split = email.split(\"@\")\n\n if \"gmail\" in email_split or \"mail\" in email_split or \"yandex\" in email_split:\n return email\n else:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Низя\")\n\n\n@router.get(\"/send_email_with_class\")\n@cache(expire=90)\nasync def send_email_class(\n) -> bool:\n \"\"\"\n ОТправка письма\n \"\"\"\n\n sleep(5)\n return True\n\n\n@router.post(\"/send_email_with_func\")\n# @cache(expire=90)\nasync def send_email_func(\n email: str = Depends(check_mail),\n) -> bool:\n \"\"\"\n ОТправка письма\n \"\"\"\n\n return True\n\n\n@router.post(\"/celery_send_mail\")\nasync def celery_send_mail(\n email: MailMessage = Depends(MailMessage),\n) -> bool:\n \"\"\"\n Отправка письма с помощью Celery\n \"\"\"\n\n test_task.delay(email=email.email, context=email.content)\n return True\n", "repo_name": "Ryboss/celery_fastapi", "sub_path": "app/endpoints/test_task.py", "file_name": "test_task.py", "file_ext": "py", "file_size_in_byte": 1751, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "fastapi.APIRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Self", "line_number": 18, "usage_type": "name"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_403_FORBIDDEN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 25, "usage_type": "name"}, {"api_name": "fastapi.exceptions.HTTPException", "line_number": 38, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_403_FORBIDDEN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "fastapi_cache.decorator.cache", "line_number": 42, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 56, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 67, "usage_type": "call"}, {"api_name": "app.core.celery_app.test_task.delay", "line_number": 73, "usage_type": "call"}, {"api_name": "app.core.celery_app.test_task", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "5803054664", "text": "import typing\nfrom typing import Any\n\nimport httpx\nfrom loguru import logger\n\nfrom integration import OpencostResourceConfig\nfrom port_ocean.context.event import event\nfrom port_ocean.utils import http_async_client\n\n\nclass OpenCostClient:\n def __init__(self, app_host: str):\n self.app_host = app_host\n self.http_client = http_async_client\n\n async def get_cost_allocation(self) -> list[dict[str, Any]]:\n \"\"\"Calls the OpenCost allocation endpoint to return data for cost and usage\n https://www.opencost.io/docs/integrations/api\n \"\"\"\n selector = typing.cast(OpencostResourceConfig, event.resource_config).selector\n params: dict[str, str] = {\n \"window\": selector.window,\n }\n if selector.aggregate:\n params[\"aggregate\"] = selector.aggregate\n if selector.step:\n params[\"step\"] = selector.step\n if selector.resolution:\n params[\"resolution\"] = selector.resolution\n\n try:\n response = await self.http_client.get(\n url=f\"{self.app_host}/allocation/compute\",\n params=params,\n )\n response.raise_for_status()\n return response.json()[\"data\"]\n except httpx.HTTPStatusError as e:\n logger.error(\n f\"HTTP error with status code: {e.response.status_code} and response text: {e.response.text}\"\n )\n raise\n", "repo_name": "port-labs/ocean", "sub_path": "integrations/opencost/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "42", "api": [{"api_name": "port_ocean.utils.http_async_client", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 21, "usage_type": "call"}, {"api_name": "integration.OpencostResourceConfig", "line_number": 21, "usage_type": "argument"}, {"api_name": "port_ocean.context.event.event.resource_config", "line_number": 21, "usage_type": "attribute"}, {"api_name": "port_ocean.context.event.event", "line_number": 21, "usage_type": "name"}, {"api_name": "httpx.HTTPStatusError", "line_number": 39, "usage_type": "attribute"}, {"api_name": "loguru.logger.error", "line_number": 40, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "39114938661", "text": "\"\"\"this is the twisted application configuration (tac)-file.\n\nit holds the basic configuration for the charun app.\nplease specify urls and ports for the twisted UDP server and the CouchDB\ninstance you can also set the function that will be applied to incoming\nmessages on the UDP server.\n\"\"\"\n\nimport logging\n\nfrom twisted.application import internet, service\nfrom twisted.python import log\nfrom twisted.python.log import ILogObserver\nfrom twisted.python.logfile import DailyLogFile\n\nfrom charun import Charun\n\n\n## configuration parameters\n## Twisted configuration\nport = 9999\nhost = 'localhost'\nloglevel = logging.INFO\n#loglevel = logging.DEBUG\n\n# this is the initial forwarding function used in the UDP Server.\n# It is applied to incoming dicts\ninitial = lambda x: x\n\n## CouchDB configuration\ncouchdb_url = \"http://localhost:5984\"\ndb_name = \"charun\"\n## Test DB\ntest_db_name = \"test_charun\"\n\n## LOGGING\nlogfile = DailyLogFile(\"charun.log\", \"tmp\")\nlogname = \"charun\"\nlogging.basicConfig(stream=logfile,\n format=\"[%(asctime)s]:charun: %(levelname)s:%(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\")\nlogger = logging.getLogger(logname)\nlogger.setLevel(loglevel)\n\n# create the application service\napplication = service.Application(\"charun couchdb bridge\")\napplication.setComponent(ILogObserver, log.PythonLoggingObserver(logname).emit)\n\n#define the UDP server on the specified port and hand the handler-class in\nudp_service = internet.UDPServer(port, Charun(couchdb_url, db_name, initial))\n\n# this hooks the udp-service to the application\nudp_service.setServiceParent(application)\n# when started with twistd, the child services will be started automatically.\n\n\ndef test_initial(x):\n \"\"\"this function serves testing purposes\"\"\"\n for it in x.items():\n x[it[0]] = it[1] * 2\n return x\n", "repo_name": "kr1/charun", "sub_path": "charun_tac.py", "file_name": "charun_tac.py", "file_ext": "py", "file_size_in_byte": 1825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "42", "api": [{"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "twisted.python.logfile.DailyLogFile", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 42, "usage_type": "call"}, {"api_name": "twisted.application.service.Application", "line_number": 46, "usage_type": "call"}, {"api_name": "twisted.application.service", "line_number": 46, "usage_type": "name"}, {"api_name": "twisted.python.log.ILogObserver", "line_number": 47, "usage_type": "argument"}, {"api_name": "twisted.python.log.PythonLoggingObserver", "line_number": 47, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 47, "usage_type": "name"}, {"api_name": "twisted.application.internet.UDPServer", "line_number": 50, "usage_type": "call"}, {"api_name": "twisted.application.internet", "line_number": 50, "usage_type": "name"}, {"api_name": "charun.Charun", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "35128014223", "text": "import functools\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n @functools.cache\n def dfs(amount):\n if amount == 0:\n return 0\n if amount < 0:\n return -1\n mincnt = math.inf\n for coin in coins:\n cnt = dfs(amount - coin)\n if cnt == -1: continue\n mincnt = min(mincnt, cnt + 1)\n return mincnt if mincnt != math.inf else -1\n return dfs(amount)\n", "repo_name": "bboxlin/algorithm", "sub_path": "record/problems/coin_change/solution.py", "file_name": "solution.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "functools.cache", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "27238868306", "text": "from datetime import datetime, timedelta\nfrom django.db.models import Q\nfrom django.utils.timezone import now\n\n\ndef get_online_status(device_model):\n InfoDevice = device_model\n time_now = now()\n devices = InfoDevice.objects.all()\n filterd_dev = []\n for dev in devices:\n frq = dev.ping_frequency\n if dev.ping_time:\n if dev.ping_time >= time_now - timedelta(seconds=2 * frq):\n filterd_dev.append(dev.id)\n devices = InfoDevice.objects.filter(id__in=filterd_dev).all()\n return devices\n\n\ndef filtered_devices(device_model,\n bus_model,\n targets):\n InfoDevice = device_model\n Bus = bus_model\n\n mask = targets.get(\"mask\")\n bus_ids = targets.get(\"buses\", [])\n device_ids = targets.get(\"devices\", [])\n if mask == -1:\n devices = []\n busdev_id = []\n if device_ids:\n devices = [dev.id for dev in device_ids]\n if bus_ids:\n busdev_id = [bus.info_device_id for bus in bus_ids]\n devs = InfoDevice.objects.filter(\n Q(id__in=devices) | Q(id__in=busdev_id)).all()\n return devs\n\n elif mask == 0:\n response = InfoDevice.objects.all()\n return response\n\n elif mask == 1:\n response = get_online_status(device_model=device_model)\n return response\n return []\n", "repo_name": "xKern/Knav-InfotainmentDevice-Filter", "sub_path": "infodevfilter/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "django.utils.timezone.now", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "4214770940", "text": "\"\"\"Post process.\"\"\"\n\nimport argparse\nimport os\nimport numpy as np\nimport pandas as pd\nfrom mpi4py import MPI\nimport stk\nimport utilities\nfrom scipy.interpolate import griddata\n\n\ndef p0_printer(par):\n iproc = par.rank\n\n def printer(*args, **kwargs):\n if iproc == 0:\n print(*args, **kwargs)\n\n return printer\n\n\nif __name__ == \"__main__\":\n\n # Parse arguments\n parser = argparse.ArgumentParser(description=\"A simple post-processing tool\")\n parser.add_argument(\n \"-m\",\n \"--mfile\",\n help=\"Root name of files to postprocess\",\n required=True,\n type=str,\n )\n parser.add_argument(\"--auto_decomp\", help=\"Auto-decomposition\", action=\"store_true\")\n parser.add_argument(\n \"-v\",\n \"--vel_name\",\n help=\"Name of the velocity field\",\n default=\"velocity\",\n type=str,\n )\n parser.add_argument(\n \"--navg\", help=\"Number of times to average\", default=40, type=int\n )\n parser.add_argument(\n \"--flowthrough\", help=\"Flowthrough time (L/u)\", default=1.0, type=float\n )\n parser.add_argument(\n \"--factor\",\n help=\"Factor of flowthrough time between time steps used in average\",\n type=float,\n default=1.2,\n )\n args = parser.parse_args()\n\n fdir = os.path.dirname(args.mfile)\n\n comm = MPI.COMM_WORLD\n size = comm.Get_size()\n rank = comm.Get_rank()\n par = stk.Parallel.initialize()\n printer = p0_printer(par)\n\n mesh = stk.StkMesh(par)\n printer(\"Reading meta data for mesh: \", args.mfile)\n mesh.read_mesh_meta_data(args.mfile, auto_decomp=args.auto_decomp)\n printer(\"Done reading meta data\")\n\n printer(\"Loading bulk data for mesh: \", args.mfile)\n mesh.populate_bulk_data()\n printer(\"Done reading bulk data\")\n\n num_time_steps = mesh.stkio.num_time_steps\n max_time = mesh.stkio.max_time\n tsteps = np.array(mesh.stkio.time_steps)\n printer(f\"\"\"Num. time steps = {num_time_steps}\\nMax. time step = {max_time}\"\"\")\n\n # Figure out the times over which to average\n if args.factor > 0:\n tmp_tavg = np.sort(\n tsteps[-1] - args.flowthrough * args.factor * np.arange(args.navg)\n )\n dist = np.abs(np.array(tsteps)[:, np.newaxis] - tmp_tavg)\n idx = dist.argmin(axis=0)\n else:\n idx = np.arange(len(tsteps) - args.navg, len(tsteps))\n tavg = tsteps[idx]\n tavg_instantaneous = tsteps[idx[0] :]\n printer(\"Averaging the following steps:\")\n printer(tavg)\n\n # Extract time and spanwise average tau_wall on wall\n walldata = None\n for tstep in tavg_instantaneous:\n ftime, missing = mesh.stkio.read_defined_input_fields(tstep)\n printer(f\"Loading tau_wall fields for time: {ftime}\")\n\n coords = mesh.meta.coordinate_field\n wall = mesh.meta.get_part(\"wall\")\n sel = wall & mesh.meta.locally_owned_part\n tauw = mesh.meta.get_field(\"tau_wall\")\n tauwv = mesh.meta.get_field(\"tau_wall_vector\")\n pressure = mesh.meta.get_field(\"pressure\")\n names = [\"x\", \"y\", \"z\", \"tauw\", \"tauwx\", \"tauwy\", \"tauwz\", \"pressure\"]\n nnodes = sum(bkt.size for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK))\n\n cnt = 0\n data = np.zeros((nnodes, len(names)))\n for bkt in mesh.iter_buckets(sel, stk.StkRank.NODE_RANK):\n xyz = coords.bkt_view(bkt)\n tw = tauw.bkt_view(bkt)\n twv = tauwv.bkt_view(bkt)\n pres = pressure.bkt_view(bkt)\n data[cnt : cnt + bkt.size, :] = np.hstack(\n (xyz, tw.reshape(-1, 1), twv, pres.reshape(-1, 1))\n )\n cnt += bkt.size\n\n if walldata is None:\n walldata = np.zeros(data.shape)\n walldata += data / len(tavg_instantaneous)\n\n lst = comm.gather(walldata, root=0)\n comm.Barrier()\n if rank == 0:\n df = pd.DataFrame(np.vstack(lst), columns=names)\n df[\"r\"] = np.sqrt(df.x ** 2 + df.y ** 2 + df.z ** 2)\n df[\"theta\"] = 180 - np.arccos(df.x / df.r) * 180 / np.pi\n wallname = os.path.join(fdir, \"wall.dat\")\n df.to_csv(wallname, index=False)\n", "repo_name": "marchdf/hybrid-amr-nalu-sphere", "sub_path": "pp.py", "file_name": "pp.py", "file_ext": "py", "file_size_in_byte": 4123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 58, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 58, "usage_type": "name"}, {"api_name": "stk.Parallel.initialize", "line_number": 61, "usage_type": "call"}, {"api_name": "stk.Parallel", "line_number": 61, "usage_type": "attribute"}, {"api_name": "stk.StkMesh", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 86, "usage_type": "call"}, {"api_name": "stk.StkRank", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "stk.StkRank", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}]} +{"seq_id": "15544582557", "text": "import csv\n\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\nfrom huxley.core.models import Registration\n\n\nclass RegistrationAdmin(admin.ModelAdmin):\n def get_rows(self):\n rows = []\n rows.append([\n \"Registration Time\", \"School Name\", \"Total Number of Delegates\",\n \"Beginners\", \"Intermediates\", \"Advanced\", \"Spanish Speakers\",\n \"Chinese Speakers\", \"Assignments Finalized\", \"Waivers Complete\",\n \"Delegate Fees Paid\", \"Delegate Fees Owed\",\n \"Paid Registration Fee?\", \"Invoice Sent\", \"Payment Type\", \"Country 1\", \"Country 2\", \"Country 3\",\n \"Country 4\", \"Country 5\", \"Country 6\", \"Country 7\", \"Country 8\",\n \"Country 9\", \"Country 10\", \"Committee Preferences\",\n \"Registration Comments\"\n ]) \n\n for registration in Registration.objects.all().order_by(\n 'school__name'):\n country_preferences = [\n str(cp)\n for cp in registration.country_preferences.all().order_by(\n 'countrypreference')\n ]\n country_preferences += [''] * (10 - len(country_preferences))\n committee_preferences = [\n ', '.join(cp.name\n for cp in registration.committee_preferences.all())\n ]\n payment_type_string = ['Credit Card' if registration.payment_type == 1 else 'Check']\n\n rows.append([\n str(field) for field in [\n registration.registered_at, registration.school.name,\n registration.num_beginner_delegates +\n registration.num_intermediate_delegates +\n registration.num_advanced_delegates,\n registration.num_beginner_delegates,\n registration.num_intermediate_delegates,\n registration.num_advanced_delegates,\n registration.num_spanish_speaking_delegates,\n registration.num_chinese_speaking_delegates, registration.\n assignments_finalized, registration.waivers_completed,\n registration.delegate_fees_paid, registration.\n delegate_fees_owed, registration.registration_fee_paid,\n registration.invoices_sent\n ]\n ] + payment_type_string +\n country_preferences + committee_preferences +\n [str(registration.registration_comments)])\n return rows\n\n def info(self, request):\n '''Returns a CSV file of all the registration information.'''\n registrations = HttpResponse(content_type='text/csv')\n registrations[\n 'Content-Disposition'] = 'attachment; filename=\"registration_info.csv\"'\n\n writer = csv.writer(registrations)\n\n for row in self.get_rows():\n writer.writerow(row)\n\n return registrations\n\n def sheets(self, request):\n if settings.SHEET_ID:\n SHEET_RANGE = 'Registration!A1:AA'\n # Store credentials\n creds = service_account.Credentials.from_service_account_file(\n settings.SERVICE_ACCOUNT_FILE, scopes=settings.SCOPES)\n\n data = self.get_rows()\n\n body = {\n 'values': data,\n }\n\n service = build('sheets', 'v4', credentials=creds)\n response = service.spreadsheets().values().clear(\n spreadsheetId=settings.SHEET_ID,\n range=SHEET_RANGE,\n ).execute()\n response = service.spreadsheets().values().update(\n spreadsheetId=settings.SHEET_ID,\n range=SHEET_RANGE,\n valueInputOption='USER_ENTERED',\n body=body).execute()\n\n return HttpResponseRedirect(\n reverse('admin:core_registration_changelist'))\n\n def get_urls(self):\n return super(RegistrationAdmin, self).get_urls() + [\n url(\n r'info',\n self.admin_site.admin_view(self.info),\n name='core_registration_info',\n ),\n url(\n r'sheets',\n self.admin_site.admin_view(self.sheets),\n name='core_registration_sheets',\n ),\n ]\n", "repo_name": "bmun/huxley", "sub_path": "huxley/core/admin/registration.py", "file_name": "registration.py", "file_ext": "py", "file_size_in_byte": 4542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "33", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}, {"api_name": "huxley.core.models.Registration.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "huxley.core.models.Registration.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "huxley.core.models.Registration", "line_number": 29, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 70, "usage_type": "call"}, {"api_name": "django.conf.settings.SHEET_ID", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 78, "usage_type": "name"}, {"api_name": "google.oauth2.service_account.Credentials.from_service_account_file", "line_number": 81, "usage_type": "call"}, {"api_name": "google.oauth2.service_account.Credentials", "line_number": 81, "usage_type": "attribute"}, {"api_name": "google.oauth2.service_account", "line_number": 81, "usage_type": "name"}, {"api_name": "django.conf.settings.SERVICE_ACCOUNT_FILE", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 82, "usage_type": "name"}, {"api_name": "django.conf.settings.SCOPES", "line_number": 82, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.build", "line_number": 90, "usage_type": "call"}, {"api_name": "django.conf.settings.SHEET_ID", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 92, "usage_type": "name"}, {"api_name": "django.conf.settings.SHEET_ID", "line_number": 96, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 96, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 101, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 102, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 106, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "16578427665", "text": "import time, decimal\r\nfrom random import randint\r\nfrom collections import Counter\r\nimport monster as m\r\nimport areas as a\r\n\r\n\r\ndef game_start():\r\n\t\"\"\"Initial player creation\"\"\"\r\n\t\r\n\t# Gets player name and creates default player\r\n\tplayer_name = input(\"What is your name? \")\r\n\t\r\n\tplayer = m.Monster(player_name, 200, 200, 3, 10, 1.4, 0, 0)\r\n\t\r\n\treturn player\r\n\r\n\r\ndef get_command():\r\n\t\"\"\"Takes player input and returns as a list\"\"\"\r\n\t\r\n\t# Accepts player input as a string, then splits into list\r\n\t# for command interpretation. Single word commands have\r\n\t# \"__\" appended to avoid references to invalid indexes\r\n\tcommand = str(input(\"\\n>>> \")).lower()\r\n\tcommand = command.split(\" \")\r\n\tif len(command) == 1:\r\n\t\tcommand.append(\"__\")\r\n\t\tcommand.append(\"__\")\r\n\tif len(command) == 2:\r\n\t\tcommand.append(\"__\")\r\n\r\n\treturn command\r\n\r\n\r\ndef combat(player, current_area, monster):\r\n\t\"\"\"A function to handle combat between a player and monster\"\"\"\r\n\t\r\n\t# Sets decimal precision to 4 digits; avoids issues with\r\n\t# floating-point arithmatic, allowing number comparisons\r\n\tdecimal.getcontext().prec = 4\r\n\t\r\n\tif monster in current_area.monsters:\r\n\t\t\r\n\t\tprint(f\"You attack a {monster.name}!\\n\")\r\n\r\n\t\t# Combat timer initializations; set to round(Decimal()) because\r\n\t\t# it still wanted to use precision of 28\r\n\t\tc_time = 0\r\n\t\tp_attack_speed = round(decimal.Decimal(player.attack_speed), 1)\r\n\t\tm_attack_speed = round(decimal.Decimal(monster.attack_speed), 1)\r\n\t\r\n\t\t# Main combat loop\r\n\t\t# Program sleeps for .1 second intervals, increments combat time,\r\n\t\t# and performs attacks at specified times based on attack speeds\r\n\t\twhile player.current_hp > 0 and monster.current_hp > 0:\r\n\t\t\r\n\t\t\ttime.sleep(.1)\r\n\t\t\tc_time += decimal.Decimal(.1)\r\n\t\t\tp_dmg = randint(player.min_dmg, player.max_dmg)\r\n\t\t\tm_dmg = randint(monster.min_dmg, monster.max_dmg)\r\n\t\t\r\n\t\t\tif c_time % p_attack_speed == 0:\r\n\t\t\t\tprint(f\"You hit the {monster.name} for {p_dmg} points of damage!\\n\")\r\n\t\t\t\tmonster.current_hp -= p_dmg\r\n\t\t\t\r\n\t\t\tif c_time % m_attack_speed == 0:\r\n\t\t\t\tprint(f\"The {monster.name} hits you for {m_dmg} points of damage!\\n\")\r\n\t\t\t\tplayer.current_hp -= m_dmg\r\n\r\n\t\t# End of combat checks/outputs & rewards\r\n\t\tif player.current_hp <= 0:\r\n\t\t\tprint(\"You have been slain!\")\r\n\t\t\r\n\t\telif monster.current_hp <= 0:\r\n\t\t\tprint(f\"You have defeated the {monster.name} and gained \"\r\n\t\t\t\tf\"{monster.exp_value} experience points!\\n\"\r\n\t\t\t\tf\"The {monster.name} has dropped {monster.gold_value} gold!\")\r\n\t\t\tplayer.current_gold += monster.gold_value\r\n\t\t\tplayer.current_exp += monster.exp_value\r\n\t\t\r\n\t\t\t# Drop chance if monster has item, then drops into area\r\n\t\t\tif monster.item != None:\r\n\t\t\t\tdrop = randint(1, 100)\r\n\t\t\t\tif drop < monster.drop_chance:\r\n\t\t\t\t\tprint(f\"A {monster.item.name} fell from the monster!\")\r\n\t\t\t\t\tcurrent_area.items.append(monster.item)\r\n\t\t\t\t\t\r\n\t\t\t# Removes a defeated monster from the area\t\t\r\n\t\t\tcurrent_area.monsters.remove(monster)\r\n\t\t\r\n\t\t# Reset monster object hp to max\r\n\t\tmonster.current_hp = monster.max_hp\r\n\t\r\n\telif monster not in current_area.monsters:\r\n\t\tprint(f\"There is no {monster.name} here...\")\r\n\r\n\r\ndef inventory(player):\r\n\t\"\"\"A function to display contents of inventory\"\"\"\r\n\t\r\n\tprint(f\"-------------------------------\\n\"\r\n\t\tf\"| Inventory |\\n\"\r\n\t\tf\"-------------------------------\\n\"\r\n\t\tf\" Gold: {player.current_gold}\\n\"\r\n\t)\r\n\t\r\n\tif len(player.inventory_list) == 0:\r\n\t\tprint(\" No items\\n\")\r\n\t\r\n\tfor item in player.inventory_list:\r\n\t\tprint(f\"{item.name.title()}\\n\")\r\n\t\r\n\tprint(f\"-------------------------------\")\r\n\t\r\n\t\r\ndef char_sheet(player):\r\n\t\"\"\"A function to display character information\"\"\"\r\n\t\r\n\tprint(f\"-------------------------------\\n\"\r\n\t\tf\"| Character |\\n\"\r\n\t\tf\"-------------------------------\\n\"\r\n\t\tf\" Name: {player.name}\\n\"\r\n\t\tf\" EXP: {player.current_exp}\\n\"\r\n\t\tf\" HP: {player.current_hp}/{player.max_hp}\\n\"\r\n\t\tf\" Damage: {player.min_dmg}-{player.max_dmg}\\n\"\r\n\t\tf\" Attack Speed: {round(1/player.attack_speed,2)} per second\"\r\n\t)\r\n\t\r\n\tprint(f\"-------------------------------\")\r\n\r\n\r\ndef look(player, current_area):\r\n\t\"\"\"A function to display information about the current area\"\"\"\r\n\t\r\n\t# Assigns area object to current_area based on player positions\r\n\t\r\n\t\r\n\tprint(f\"{current_area.description}\")\r\n\t\r\n\t# Displays items in the area, if any\r\n\tif len(current_area.items) > 0:\r\n\t\tfor i in range(0, len(current_area.items)):\r\n\t\t\tprint(f\"You spot a {current_area.items[i].name}!\")\r\n\t\r\n\t# Displays monsters in the area, if any\r\n\tif len(current_area.monsters) > 0:\r\n\t\t\r\n\t\t# Counts number of unique monsters and stores in dictionary\r\n\t\tmonster_count = Counter(current_area.monsters)\r\n\t\t\r\n\t\t# Creates a list of keys and values for dictionary\r\n\t\t# to allow index reference\r\n\t\ttotal_count = list(monster_count.values())\r\n\t\tmonster = list(monster_count.keys())\r\n\t\t\r\n\t\tprint(\"You see \", end =\"\")\r\n\t\tfor i in range(0, len(total_count)):\r\n\r\n\t\t\t# Output when there is more than one of a specific monster\r\n\t\t\tif total_count[i] > 1:\r\n\t\t\t\tprint(f\"{total_count[i]} {monster[i].name}s\", end=\"\")\r\n\t\t\t\t\r\n\t\t\tif len(total_count) > 2 and i < len(total_count)-1:\r\n\t\t\t\tprint(\", \", end=\"\")\r\n\t\t\t\r\n\t\t\telif len(total_count) > 1 and total_count[i] == total_count[-1]:\r\n\t\t\t\tprint(\" and \", end=\"\")\r\n\t\t\t\r\n\t\t\t# Output when there is only a single monster\r\n\t\t\tif total_count[i] == 1:\r\n\t\t\t\tprint(f\"a {monster[i].name}\", end=\"\")\r\n\t\t\t\t\r\n\t\tprint(\" here!\")\r\n\t\t\t\r\n#\t\tfor monster in range(0, len(current_area.monsters)):\r\n#\t\t\tprint(f\"There is a {current_area.monsters[monster].name} here...\")\r\n\t\r\n\tprint(f\"Exits: {current_area.exits}\")\r\n\r\n\t\r\ndef move(player, current_area, direction):\r\n\t\"\"\"A function to move between areas\"\"\"\r\n\r\n\tdirections = {\r\n\t\t\"n\": 1,\r\n\t\t\"s\": -1,\r\n\t\t\"e\": 1,\r\n\t\t\"w\": -1,\r\n\t\t\"u\": 1,\r\n\t\t\"d\": -1\r\n\t\t}\r\n\t\r\n\t# Updates player x/y/z position\r\n\tif direction in current_area.exits:\r\n\t\tif direction == \"n\" or direction == \"s\":\r\n\t\t\tplayer.y_pos += directions[direction]\r\n\t\telif direction == \"e\" or direction == \"w\":\r\n\t\t\tplayer.x_pos += directions[direction]\r\n\t\telse:\r\n\t\t\tplayer.z_pos += directions[direction]\r\n\r\n\t\t# Updates current_area and performs look()\r\n\t\tcurrent_area = a.area_list[(player.x_pos, player.y_pos, player.z_pos)]\r\n\t\tlook(player, current_area)\r\n\t\treturn current_area\r\n\telse:\r\n\t\tprint(f\"You cannot go {direction.title()} here.\")\r\n\t\treturn current_area\r\n\r\n\r\ndef get(player, current_area, item):\r\n\t\"\"\"A function to pick items up\"\"\"\r\n\r\n\t# When no target is supplied, function gets called with \"get\"\r\n\t# as the target. This was the easiest way to throw an error.\r\n\t# Also throws out non-item objects being passed.\r\n\tif item not in current_area.items:\r\n\t\tprint(f\"There are no {item.name}s in this area.\")\r\n\t\r\n\telif item in current_area.items:\r\n\t\tprint(f\"You picked up the {item.name}.\")\r\n\t\tplayer.inventory_list.append(item)\r\n\t\tcurrent_area.items.remove(item)\r\n\r\n\r\ndef use(player, item):\r\n\t\"\"\"A function to use items in player inventory\"\"\"\r\n\t\r\n\t# Makes sure the item is in the player's inventory and can be used\r\n\tif item not in player.inventory_list or item.use == False:\r\n\t\tprint(f\"You don't have that.\")\r\n\t\r\n\telif item in player.inventory_list and item.use == True:\r\n\t\tfor k, v in item.__dict__.items():\r\n\t\t\tif k == \"heal_value\" and v > 0:\r\n\t\t\t\tif player.current_hp == player.max_hp:\r\n\t\t\t\t\tprint(\"You are already at maximum health.\")\r\n\t\t\t\t\tplayer.inventory_list.append(item)\r\n\t\t\t\telif player.current_hp > player.max_hp - v:\r\n\t\t\t\t\tprint(f\"You healed for {player.max_hp-player.current_hp}hp.\")\r\n\t\t\t\t\tplayer.current_hp = player.max_hp\r\n\t\t\t\telif player.current_hp < player.max_hp:\r\n\t\t\t\t\tplayer.current_hp += v\r\n\t\t\t\t\tprint(f\"You healed for {v}hp.\")\r\n\r\n\t\t\t\t\t\r\n\t\tplayer.inventory_list.remove(item)\r\n\r\n\r\ndef examine():\r\n\t\"\"\"A function to get information about an item\"\"\"\r\n\t\r\n\r\ndef help_sheet(player):\r\n\t\"\"\"A function to list commands\"\"\"\r\n\t\r\n\tprint(f\"-------------------------------\\n\"\r\n\t\tf\"| Help |\\n\"\r\n\t\tf\"-------------------------------\\n\"\r\n\t\tf\"\\n\"\r\n\t\tf\"Attack Commands\\t\\tItem Commands:\\n\"\r\n\t\tf\"kill \\t\\tget \\n\"\r\n\t\tf\"attack \\t\\tuse \\n\"\r\n\t\tf\"fight \\n\"\r\n\t\tf\"\\n\"\r\n\t\tf\"Movement Commands:\\tMisc Commands:\\n\"\r\n\t\tf\"North:\\tn\\t\\tLook:\\tl\\n\"\r\n\t\tf\"South:\\ts\\t\\tCharacter Info: c\\n\"\r\n\t\tf\"East:\\te\\t\\tExamine: e \\n\"\r\n\t\tf\"West:\\tw\\n\"\r\n\t\tf\"Up:\\tu\\n\"\r\n\t\tf\"Down:\\td\\n\")\r\n", "repo_name": "careytyl/adventure-game", "sub_path": "game_functions.py", "file_name": "game_functions.py", "file_ext": "py", "file_size_in_byte": 8225, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "monster.Monster", "line_number": 14, "usage_type": "call"}, {"api_name": "decimal.getcontext", "line_number": 41, "usage_type": "call"}, {"api_name": "monster.name", "line_number": 45, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 50, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 51, "usage_type": "call"}, {"api_name": "monster.attack_speed", "line_number": 51, "usage_type": "attribute"}, {"api_name": "monster.current_hp", "line_number": 56, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 58, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 61, "usage_type": "call"}, {"api_name": "monster.min_dmg", "line_number": 61, "usage_type": "attribute"}, {"api_name": "monster.max_dmg", "line_number": 61, "usage_type": "attribute"}, {"api_name": "monster.name", "line_number": 64, "usage_type": "attribute"}, {"api_name": "monster.current_hp", "line_number": 65, "usage_type": "attribute"}, {"api_name": "monster.name", "line_number": 68, "usage_type": "attribute"}, {"api_name": "monster.current_hp", "line_number": 75, "usage_type": "attribute"}, {"api_name": "monster.name", "line_number": 76, "usage_type": "attribute"}, {"api_name": "monster.exp_value", "line_number": 77, "usage_type": "attribute"}, {"api_name": "monster.name", "line_number": 78, "usage_type": "attribute"}, {"api_name": "monster.gold_value", "line_number": 78, "usage_type": "attribute"}, {"api_name": "monster.gold_value", "line_number": 79, "usage_type": "attribute"}, {"api_name": "monster.exp_value", "line_number": 80, "usage_type": "attribute"}, {"api_name": "monster.item", "line_number": 83, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "monster.drop_chance", "line_number": 85, "usage_type": "attribute"}, {"api_name": "monster.item", "line_number": 86, "usage_type": "attribute"}, {"api_name": "monster.item", "line_number": 87, "usage_type": "attribute"}, {"api_name": "monster.current_hp", "line_number": 93, "usage_type": "attribute"}, {"api_name": "monster.max_hp", "line_number": 93, "usage_type": "attribute"}, {"api_name": "monster.name", "line_number": 96, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 150, "usage_type": "call"}, {"api_name": "areas.area_list", "line_number": 204, "usage_type": "attribute"}]} +{"seq_id": "74359316925", "text": "#!/usr/bin/env python3\n\nimport json, os\n\nfrom common import *\n\nfrom test_asmjs import test_asmjs\nfrom test_c import test_c\nfrom test_d import test_d\n\ndef main( verbose = True ):\n\n result_li = []\n\n if verbose: print( os.linesep + '.' )\n result_li.extend( test_asmjs( verbose = verbose ) )\n\n if verbose: print( os.linesep + '.' )\n result_li.extend( test_c( verbose = verbose ) )\n \n if verbose: print( os.linesep + '.' )\n result_li.extend( test_d( verbose = verbose ) )\n \n\n s = summary( result_li )\n \n if verbose: \n print( os.linesep + '.' )\n print()\n print( '--- all done ---' )\n print( s[ MESSAGE ] )\n\n if s[ N_FAILURE ] > 0:\n if verbose: print( 'Failure(s):' )\n faili = filter( lambda x: not x[ OK ], result_li )\n if verbose: print( os.linesep.join( map( json.dumps, faili ) ) )\n\n return s\n\nif __name__ == '__main__':\n main()\n", "repo_name": "glathoud/flatorize", "sub_path": "test/test_all.py", "file_name": "test_all.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "42", "api": [{"api_name": "os.linesep", "line_number": 15, "usage_type": "attribute"}, {"api_name": "test_asmjs.test_asmjs", "line_number": 16, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 18, "usage_type": "attribute"}, {"api_name": "test_c.test_c", "line_number": 19, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 21, "usage_type": "attribute"}, {"api_name": "test_d.test_d", "line_number": 22, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.linesep.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.linesep", "line_number": 36, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "30054445425", "text": "# coverage: ignore\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\n\nimport numpy as np\nimport pytest\n\nfrom openfermion.resource_estimates import HAVE_DEPS_FOR_RESOURCE_ESTIMATES\n\nif HAVE_DEPS_FOR_RESOURCE_ESTIMATES:\n from pyscf.pbc import mp\n\n from openfermion.resource_estimates.pbc.testing import make_diamond_113_szv\n from openfermion.resource_estimates.pbc.df.df_integrals import DFABKpointIntegrals\n from openfermion.resource_estimates.pbc.hamiltonian import cholesky_from_df_ints\n\n\n@pytest.mark.skipif(not HAVE_DEPS_FOR_RESOURCE_ESTIMATES, reason='pyscf and/or jax not installed.')\ndef test_df_amat_bmat():\n mf = make_diamond_113_szv()\n mymp = mp.KMP2(mf)\n nmo = mymp.nmo\n\n Luv = cholesky_from_df_ints(mymp) # [kpt, kpt, naux, nao, nao]\n dfk_inst = DFABKpointIntegrals(Luv.copy(), mf)\n naux = dfk_inst.naux\n\n dfk_inst.double_factorize()\n\n nkpts = len(mf.kpts)\n for qidx, kidx in itertools.product(range(nkpts), repeat=2):\n Amats, Bmats = dfk_inst.build_A_B_n_q_k_from_chol(qidx, kidx)\n # check if Amats and Bmats have the correct size\n assert Amats.shape == (naux, 2 * nmo, 2 * nmo)\n assert Bmats.shape == (naux, 2 * nmo, 2 * nmo)\n\n # check if Amats and Bmats have the correct symmetry--Hermitian\n assert np.allclose(Amats, Amats.conj().transpose(0, 2, 1))\n assert np.allclose(Bmats, Bmats.conj().transpose(0, 2, 1))\n\n # check if we can recover the Cholesky vector from Amat\n k_minus_q_idx = dfk_inst.k_transfer_map[qidx, kidx]\n test_chol = dfk_inst.build_chol_part_from_A_B(kidx, qidx, Amats, Bmats)\n assert np.allclose(test_chol, dfk_inst.chol[kidx, k_minus_q_idx])\n\n # check if factorized is working numerically exact case\n assert np.allclose(dfk_inst.amat_n_mats[kidx, qidx], Amats)\n assert np.allclose(dfk_inst.bmat_n_mats[kidx, qidx], Bmats)\n\n for nn in range(Amats.shape[0]):\n w, v = np.linalg.eigh(Amats[nn, :, :])\n non_zero_idx = np.where(w > 1.0e-4)[0]\n w = w[non_zero_idx]\n v = v[:, non_zero_idx]\n assert len(w) <= 2 * nmo\n\n for qidx in range(nkpts):\n for nn in range(naux):\n for kidx in range(nkpts):\n eigs_a_fixed_n_q = dfk_inst.amat_lambda_vecs[kidx, qidx, nn]\n eigs_b_fixed_n_q = dfk_inst.bmat_lambda_vecs[kidx, qidx, nn]\n assert len(eigs_a_fixed_n_q) <= 2 * nmo\n assert len(eigs_b_fixed_n_q) <= 2 * nmo\n\n for kidx in range(nkpts):\n for kpidx in range(nkpts):\n for qidx in range(nkpts):\n kmq_idx = dfk_inst.k_transfer_map[qidx, kidx]\n kpmq_idx = dfk_inst.k_transfer_map[qidx, kpidx]\n exact_eri_block = dfk_inst.get_eri_exact([kidx, kmq_idx, kpmq_idx, kpidx])\n test_eri_block = dfk_inst.get_eri([kidx, kmq_idx, kpmq_idx, kpidx])\n assert np.allclose(exact_eri_block, test_eri_block)\n", "repo_name": "quantumlib/OpenFermion", "sub_path": "src/openfermion/resource_estimates/pbc/df/df_integrals_test.py", "file_name": "df_integrals_test.py", "file_ext": "py", "file_size_in_byte": 3510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1426, "dataset": "github-code", "pt": "33", "api": [{"api_name": "openfermion.resource_estimates.HAVE_DEPS_FOR_RESOURCE_ESTIMATES", "line_number": 20, "usage_type": "name"}, {"api_name": "openfermion.resource_estimates.pbc.testing.make_diamond_113_szv", "line_number": 30, "usage_type": "call"}, {"api_name": "pyscf.pbc.mp.KMP2", "line_number": 31, "usage_type": "call"}, {"api_name": "pyscf.pbc.mp", "line_number": 31, "usage_type": "name"}, {"api_name": "openfermion.resource_estimates.pbc.hamiltonian.cholesky_from_df_ints", "line_number": 34, "usage_type": "call"}, {"api_name": "openfermion.resource_estimates.pbc.df.df_integrals.DFABKpointIntegrals", "line_number": 35, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.linalg.eigh", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 82, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "openfermion.resource_estimates.HAVE_DEPS_FOR_RESOURCE_ESTIMATES", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "35251544397", "text": "#!/usr/bin/python3\n\nimport getopt\nimport socket\nfrom sys import argv\nimport multiprocessing\n\n\ndef invert_message(client_socket, address):\n while True:\n data = client_socket.recv(1024)\n received = data.decode()\n if received == \"\":\n break\n msg_reversed = received[::-1]\n client_socket.send(msg_reversed.encode())\n print(\"Address: %s \" % str(address), \"Received correctly: \" + data.decode())\n\n\ndef main():\n\n port = None\n\n (options, args) = getopt.getopt(argv[1:], 'p:', [])\n\n for (opts, arg) in options:\n if opts == '-p':\n port = int(arg)\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = \"\"\n server_socket.bind((host, port))\n server_socket.listen(5)\n print('SERVER LISTENING ...')\n\n while True:\n client_socket, address = server_socket.accept()\n cliet = multiprocessing.Process(target=invert_message, args= (client_socket,address))\n cliet.start()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "YoelRoger/Computacion2", "sub_path": "ej17_echo_inv/ej17_server.py", "file_name": "ej17_server.py", "file_ext": "py", "file_size_in_byte": 1032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "getopt.getopt", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 30, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 30, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "26025573069", "text": "import copy\nimport math\nimport random\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom typing import Union, List\n\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torch import nn\nfrom torch.optim import *\nfrom torch.optim.lr_scheduler import *\nfrom torch.utils.data import DataLoader\nfrom torchprofile import profile_macs\nfrom torchvision.datasets import *\nfrom torchvision.transforms import *\nfrom tqdm.auto import tqdm\n\nfrom torchprofile import profile_macs\n\n\n\n\nByte = 8\nKiB = 1024 * Byte\nMiB = 1024 * KiB\nGiB = 1024 * MiB\n\n\nrandom.seed(0)\nnp.random.seed(0)\ntorch.manual_seed(0)\n\n# def recover_model(checkpoint):\n# recover_model = lambda: model.load_state_dict(checkpoint['state_dict'])\n\ndef train(\n model: nn.Module,\n dataloader: DataLoader,\n criterion: nn.Module,\n optimizer: Optimizer,\n scheduler: LambdaLR,\n callbacks = None\n) -> None:\n model.train()\n\n for inputs, targets in tqdm(dataloader, desc='train', leave=False):\n # Move the data from CPU to GPU\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Reset the gradients (from the last iteration)\n optimizer.zero_grad()\n\n # Forward inference\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n # Backward propagation\n loss.backward()\n\n # Update optimizer and LR scheduler\n optimizer.step()\n scheduler.step()\n\n if callbacks is not None:\n for callback in callbacks:\n callback()\n\n\n@torch.inference_mode()\ndef evaluate(\n model: nn.Module,\n dataloader: DataLoader, \n verbose=True,\n) -> float:\n model.eval()\n\n num_samples = 0\n num_correct = 0\n\n for inputs, targets in tqdm(dataloader, desc=\"eval\", leave=False, \n disable=not verbose):\n # Move the data from CPU to GPU\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Inference\n outputs = model(inputs)\n\n # Convert logits to class indices\n outputs = outputs.argmax(dim=1)\n\n # Update metrics\n num_samples += targets.size(0)\n num_correct += (outputs == targets).sum()\n\n return (num_correct / num_samples * 100).item()\n\ndef get_model_macs(model, inputs) -> int:\n return profile_macs(model, inputs)\n\n\ndef get_sparsity(tensor: torch.Tensor) -> float:\n \"\"\"\n calculate the sparsity of the given tensor\n sparsity = #zeros / #elements = 1 - #nonzeros / #elements\n \"\"\"\n return 1 - float(tensor.count_nonzero()) / tensor.numel()\n\n\ndef get_model_sparsity(model: nn.Module) -> float:\n \"\"\"\n calculate the sparsity of the given model\n sparsity = #zeros / #elements = 1 - #nonzeros / #elements\n \"\"\"\n num_nonzeros, num_elements = 0, 0\n for param in model.parameters():\n num_nonzeros += param.count_nonzero()\n num_elements += param.numel()\n return 1 - float(num_nonzeros) / num_elements\n\ndef get_num_parameters(model: nn.Module, count_nonzero_only=False) -> int:\n \"\"\"\n calculate the total number of parameters of model\n :param count_nonzero_only: only count nonzero weights\n \"\"\"\n num_counted_elements = 0\n for param in model.parameters():\n if count_nonzero_only:\n num_counted_elements += param.count_nonzero()\n else:\n num_counted_elements += param.numel()\n return num_counted_elements\n\n\ndef get_model_size(model: nn.Module, data_width=32, count_nonzero_only=False) -> int:\n \"\"\"\n calculate the model size in bits\n :param data_width: #bits per element\n :param count_nonzero_only: only count nonzero weights\n \"\"\"\n return get_num_parameters(model, count_nonzero_only) * data_width\n\n\n\n\ndef plot_weight_distribution(model, bins=256, count_nonzero_only=False):\n fig, axes = plt.subplots(3,3, figsize=(10, 6))\n axes = axes.ravel()\n plot_index = 0\n for name, param in model.named_parameters():\n if param.dim() > 1:\n ax = axes[plot_index]\n if count_nonzero_only:\n param_cpu = param.detach().view(-1).cpu()\n param_cpu = param_cpu[param_cpu != 0].view(-1)\n ax.hist(param_cpu, bins=bins, density=True, \n color = 'blue', alpha = 0.5)\n else:\n ax.hist(param.detach().view(-1).cpu(), bins=bins, density=True, \n color = 'blue', alpha = 0.5)\n ax.set_xlabel(name)\n ax.set_ylabel('density')\n plot_index += 1\n fig.suptitle('Histogram of Weights')\n fig.tight_layout()\n fig.subplots_adjust(top=0.925)\n plt.show()\n\n\ndef fine_grained_prune(tensor: torch.Tensor, sparsity : float) -> torch.Tensor:\n \"\"\"\n magnitude-based pruning for single tensor\n :param tensor: torch.(cuda.)Tensor, weight of conv/fc layer\n :param sparsity: float, pruning sparsity\n sparsity = #zeros / #elements = 1 - #nonzeros / #elements\n :return:\n torch.(cuda.)Tensor, mask for zeros\n \"\"\"\n sparsity = min(max(0.0, sparsity), 1.0)\n if sparsity == 1.0:\n tensor.zero_()\n return torch.zeros_like(tensor)\n elif sparsity == 0.0:\n return torch.ones_like(tensor)\n\n num_elements = tensor.numel()\n\n ##################### YOUR CODE STARTS HERE #####################\n # Step 1: calculate the #zeros (please use round())\n num_zeros = round(num_elements * sparsity)\n # Step 2: calculate the importance of weight\n importance = tensor.abs()\n # Step 3: calculate the pruning threshold\n threshold = importance.view(-1).kthvalue(num_zeros).values\n # Step 4: get binary mask (1 for nonzeros, 0 for zeros)\n mask = torch.gt(importance, threshold)\n ##################### YOUR CODE ENDS HERE #######################\n\n # Step 5: apply mask to prune the tensor\n tensor.mul_(mask)\n\n return mask\n\n\nclass FineGrainedPruner:\n def __init__(self, model, sparsity_dict):\n self.masks = FineGrainedPruner.prune(model, sparsity_dict)\n\n @torch.no_grad()\n def apply(self, model):\n for name, param in model.named_parameters():\n if name in self.masks:\n param *= self.masks[name]\n\n @staticmethod\n @torch.no_grad()\n def prune(model, sparsity_dict):\n masks = dict()\n for name, param in model.named_parameters():\n if param.dim() > 1: # we only prune conv and fc weights\n masks[name] = fine_grained_prune(param, sparsity_dict[name])\n return masks\n\n\n@torch.no_grad()\ndef sensitivity_scan(model, dataloader, scan_step=0.1, scan_start=0.4, scan_end=1.0, verbose=True):\n sparsities = np.arange(start=scan_start, stop=scan_end, step=scan_step)\n accuracies = []\n named_conv_weights = [(name, param) for (name, param) \\\n in model.named_parameters() if param.dim() > 1]\n for i_layer, (name, param) in enumerate(named_conv_weights):\n param_clone = param.detach().clone()\n accuracy = []\n for sparsity in tqdm(sparsities, desc=f'scanning {i_layer}/{len(named_conv_weights)} weight - {name}'):\n fine_grained_prune(param.detach(), sparsity=sparsity)\n acc = evaluate(model, dataloader, verbose=False)\n if verbose:\n print(f'\\r sparsity={sparsity:.2f}: accuracy={acc:.2f}%', end='')\n # restore\n param.copy_(param_clone)\n accuracy.append(acc)\n if verbose:\n print(f'\\r sparsity=[{\",\".join([\"{:.2f}\".format(x) for x in sparsities])}]: accuracy=[{\", \".join([\"{:.2f}%\".format(x) for x in accuracy])}]', end='')\n accuracies.append(accuracy)\n return sparsities, accuracies\n\n\ndef plot_sensitivity_scan(model, sparsities, accuracies, dense_model_accuracy):\n lower_bound_accuracy = 100 - (100 - dense_model_accuracy) * 1.5\n fig, axes = plt.subplots(3, int(math.ceil(len(accuracies) / 3)),figsize=(15,8))\n axes = axes.ravel()\n plot_index = 0\n for name, param in model.named_parameters():\n if param.dim() > 1:\n ax = axes[plot_index]\n curve = ax.plot(sparsities, accuracies[plot_index])\n line = ax.plot(sparsities, [lower_bound_accuracy] * len(sparsities))\n ax.set_xticks(np.arange(start=0.4, stop=1.0, step=0.1))\n ax.set_ylim(80, 95)\n ax.set_title(name)\n ax.set_xlabel('sparsity')\n ax.set_ylabel('top-1 accuracy')\n ax.legend([\n 'accuracy after pruning',\n f'{lower_bound_accuracy / dense_model_accuracy * 100:.0f}% of dense model accuracy'\n ])\n ax.grid(axis='x')\n plot_index += 1\n fig.suptitle('Sensitivity Curves: Validation Accuracy vs. Pruning Sparsity')\n fig.tight_layout()\n fig.subplots_adjust(top=0.925)\n plt.show()\n\n\ndef get_num_channels_to_keep(channels: int, prune_ratio: float) -> int:\n \"\"\"A function to calculate the number of layers to PRESERVE after pruning\n Note that preserve_rate = 1. - prune_ratio\n \"\"\"\n ##################### YOUR CODE STARTS HERE #####################\n return int(round(channels * (1. - prune_ratio)))\n ##################### YOUR CODE ENDS HERE #####################\n\n@torch.no_grad()\ndef channel_prune(model: nn.Module, \n prune_ratio: Union[List, float]) -> nn.Module:\n \"\"\"Apply channel pruning to each of the conv layer in the backbone\n Note that for prune_ratio, we can either provide a floating-point number,\n indicating that we use a uniform pruning rate for all layers, or a list of\n numbers to indicate per-layer pruning rate.\n \"\"\"\n # sanity check of provided prune_ratio\n assert isinstance(prune_ratio, (float, list))\n n_conv = len([m for m in model.backbone if isinstance(m, nn.Conv2d)])\n # note that for the ratios, it affects the previous conv output and next\n # conv input, i.e., conv0 - ratio0 - conv1 - ratio1-...\n if isinstance(prune_ratio, list):\n assert len(prune_ratio) == n_conv - 1\n else: # convert float to list\n prune_ratio = [prune_ratio] * (n_conv - 1)\n\n # we prune the convs in the backbone with a uniform ratio\n model = copy.deepcopy(model) # prevent overwrite\n # we only apply pruning to the backbone features\n all_convs = [m for m in model.backbone if isinstance(m, nn.Conv2d)]\n all_bns = [m for m in model.backbone if isinstance(m, nn.BatchNorm2d)]\n # apply pruning. we naively keep the first k channels\n assert len(all_convs) == len(all_bns)\n for i_ratio, p_ratio in enumerate(prune_ratio):\n prev_conv = all_convs[i_ratio]\n prev_bn = all_bns[i_ratio]\n next_conv = all_convs[i_ratio + 1]\n original_channels = prev_conv.out_channels # same as next_conv.in_channels\n n_keep = get_num_channels_to_keep(original_channels, p_ratio)\n\n # prune the output of the previous conv and bn\n prev_conv.weight.set_(prev_conv.weight.detach()[:n_keep])\n prev_bn.weight.set_(prev_bn.weight.detach()[:n_keep])\n prev_bn.bias.set_(prev_bn.bias.detach()[:n_keep])\n prev_bn.running_mean.set_(prev_bn.running_mean.detach()[:n_keep])\n prev_bn.running_var.set_(prev_bn.running_var.detach()[:n_keep])\n\n # prune the input of the next conv (hint: just one line of code)\n ##################### YOUR CODE STARTS HERE #####################\n next_conv.weight.set_(next_conv.weight.detach()[:, :n_keep])\n ##################### YOUR CODE ENDS HERE #####################\n\n return model\n \n\n\n# function to sort the channels from important to non-important\ndef get_input_channel_importance(weight):\n in_channels = weight.shape[1]\n importances = []\n # compute the importance for each input channel\n for i_c in range(weight.shape[1]):\n channel_weight = weight.detach()[:, i_c]\n ##################### YOUR CODE STARTS HERE #####################\n importance = torch.norm(channel_weight)\n ##################### YOUR CODE ENDS HERE #####################\n importances.append(importance.view(1))\n return torch.cat(importances)\n\n@torch.no_grad()\ndef apply_channel_sorting(model):\n model = copy.deepcopy(model) # do not modify the original model\n # fetch all the conv and bn layers from the backbone\n all_convs = [m for m in model.backbone if isinstance(m, nn.Conv2d)]\n all_bns = [m for m in model.backbone if isinstance(m, nn.BatchNorm2d)]\n # iterate through conv layers\n for i_conv in range(len(all_convs) - 1):\n # each channel sorting index, we need to apply it to:\n # - the output dimension of the previous conv\n # - the previous BN layer\n # - the input dimension of the next conv (we compute importance here)\n prev_conv = all_convs[i_conv]\n prev_bn = all_bns[i_conv]\n next_conv = all_convs[i_conv + 1]\n # note that we always compute the importance according to input channels\n importance = get_input_channel_importance(next_conv.weight)\n # sorting from large to small\n sort_idx = torch.argsort(importance, descending=True) \n\n # apply to previous conv and its following bn\n prev_conv.weight.copy_(torch.index_select(\n prev_conv.weight.detach(), 0, sort_idx))\n for tensor_name in ['weight', 'bias', 'running_mean', 'running_var']:\n tensor_to_apply = getattr(prev_bn, tensor_name)\n tensor_to_apply.copy_(\n torch.index_select(tensor_to_apply.detach(), 0, sort_idx)\n )\n \n # apply to the next conv input (hint: one line of code)\n ##################### YOUR CODE STARTS HERE #####################\n next_conv.weight.copy_(\n torch.index_select(next_conv.weight.detach(), 1, sort_idx))\n ##################### YOUR CODE ENDS HERE #####################\n\n return model\n\n\n@torch.no_grad()\ndef measure_latency(model, dummy_input, n_warmup=20, n_test=100):\n model.eval()\n # warmup\n for _ in range(n_warmup):\n _ = model(dummy_input)\n # real test\n t1 = time.time()\n for _ in range(n_test):\n _ = model(dummy_input)\n t2 = time.time()\n return (t2 - t1) / n_test # average latency\n", "repo_name": "satabios/torp", "sub_path": "build/lib/torp/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 14081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "random.seed", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 75, "usage_type": "name"}, {"api_name": "tqdm.auto.tqdm", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.inference_mode", "line_number": 72, "usage_type": "call"}, {"api_name": "torchprofile.profile_macs", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.zeros_like", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.ones_like", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.gt", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 230, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 287, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 288, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 296, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 296, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 307, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 307, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 308, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 308, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 288, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 288, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 342, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 345, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 351, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 351, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 352, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 352, "usage_type": "name"}, {"api_name": "torch.argsort", "line_number": 365, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 368, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 379, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 347, "usage_type": "call"}, {"api_name": "time.time", "line_number": 392, "usage_type": "call"}, {"api_name": "time.time", "line_number": 395, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 385, "usage_type": "call"}]} +{"seq_id": "27442151724", "text": "#!/usr/bin/env python\n#_*_ conding: utf-8\n\nimport requests\nimport argparse\nimport json\n\ndef geolocator(args):\n res = requests.get(\"https://ipinfo.io/{}/json\".format(args.address))\n ip_info = json.loads(res.text)\n for key, value in ip_info.items():\n print(\"{}: {}\".format(key, value))\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--address',help=\"IP address option\", dest='address',required=True)\n args = parser.parse_args()\n\n geolocator(args)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n exit()", "repo_name": "VRodrigo/pycyber_scripts", "sub_path": "dnsinfo/geolocation.py", "file_name": "geolocation.py", "file_ext": "py", "file_size_in_byte": 599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 10, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "73314665085", "text": "import os\nimport boto3 # type: ignore\nimport json\n\n__api_keys_s3_bucket = os.getenv(\"API_KEYS_S3_BUCKET\")\n__api_keys_s3_key = os.getenv(\"API_KEYS_S3_KEY\")\nif __api_keys_s3_bucket is None or __api_keys_s3_key is None:\n ASANA_API_KEY = os.getenv(\"ASANA_API_KEY\", \"\")\n GITHUB_API_KEY = os.getenv(\"GITHUB_API_KEY\", \"\")\n GITHUB_HMAC_SECRET = os.getenv(\"GITHUB_HMAC_SECRET\", \"\")\nelse:\n s3 = boto3.client(\"s3\")\n obj = s3.get_object(Bucket=__api_keys_s3_bucket, Key=__api_keys_s3_key)\n keys = json.loads(obj[\"Body\"].read())\n ASANA_API_KEY = keys.get(\"ASANA_API_KEY\", \"\")\n GITHUB_API_KEY = keys.get(\"GITHUB_API_KEY\", \"\")\n GITHUB_HMAC_SECRET = keys.get(\"GITHUB_HMAC_SECRET\", \"\")\n\nENV = os.getenv(\"ENV\", \"dev\")\nLOCK_TABLE = os.getenv(\"LOCK_TABLE\", \"sgtm-lock\")\nOBJECTS_TABLE = os.getenv(\"OBJECTS_TABLE\", \"sgtm-objects\")\nUSERS_TABLE = os.getenv(\"USERS_TABLE\", \"sgtm-users\")\nASANA_USERS_PROJECT_ID = os.getenv(\"ASANA_USERS_PROJECT_ID\", \"\")\n\n\n# Feature flags\ndef is_feature_flag_enabled(flag_name: str) -> bool:\n return os.getenv(flag_name) == \"true\"\n\n\nSGTM_FEATURE__AUTOCOMPLETE_ENABLED = is_feature_flag_enabled(\n \"SGTM_FEATURE__AUTOCOMPLETE_ENABLED\"\n)\nSGTM_FEATURE__AUTOMERGE_ENABLED = is_feature_flag_enabled(\n \"SGTM_FEATURE__AUTOMERGE_ENABLED\"\n)\nSGTM_FEATURE__DISABLE_GITHUB_TEAM_SUBSCRIPTION = is_feature_flag_enabled(\n \"SGTM_FEATURE__DISABLE_GITHUB_TEAM_SUBSCRIPTION\"\n)\nSGTM_FEATURE__FOLLOWUP_REVIEW_GITHUB_USERS = {\n github_username\n for github_username in os.getenv(\n \"SGTM_FEATURE__FOLLOWUP_REVIEW_GITHUB_USERS\", \"\"\n ).split(\",\")\n if github_username\n}\nSGTM_FEATURE__CHECK_RERUN_THRESHOLD_HOURS = int(\n os.getenv(\"SGTM_FEATURE__CHECK_RERUN_THRESHOLD_HOURS\", \"0\")\n)\nSGTM_FEATURE__CHECK_RERUN_BASE_REF_NAMES = {\n base_ref\n for base_ref in os.getenv(\n \"SGTM_FEATURE__CHECK_RERUN_BASE_REF_NAMES\", \"main,master\"\n ).split(\",\")\n if base_ref\n}\n", "repo_name": "Asana/SGTM", "sub_path": "src/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "42", "api": [{"api_name": "os.getenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 42, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 48, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "26033465850", "text": "'''\nStreamlit main\n\n'''\n\n# Imports\nimport streamlit as st\nimport function_streamlit as ft\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\n# Page settings\nft.config_page()\n\n# Cache\nst.cache(suppress_st_warning=True)\n\n# Sidebar - Menu\nmenu = st.sidebar.selectbox('Menu', \n ['Home', 'Validated Predictions', 'Future Forecast', 'Conclussions'])\n\nif menu == 'Home':\n ft.home()\nelif menu == 'Validated Predictions':\n ft.val_predictions()\nelif menu == 'Future Forecast':\n ft.future()\nelif menu == 'Conclussions':\n ft.conclussions()\n", "repo_name": "aJimenezAdalia/stocks_prediction_prophet", "sub_path": "src/main_streamlit.py", "file_name": "main_streamlit.py", "file_ext": "py", "file_size_in_byte": 573, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "warnings.simplefilter", "line_number": 10, "usage_type": "call"}, {"api_name": "function_streamlit.config_page", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 20, "usage_type": "attribute"}, {"api_name": "function_streamlit.home", "line_number": 24, "usage_type": "call"}, {"api_name": "function_streamlit.val_predictions", "line_number": 26, "usage_type": "call"}, {"api_name": "function_streamlit.future", "line_number": 28, "usage_type": "call"}, {"api_name": "function_streamlit.conclussions", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "70528915616", "text": "import pathlib\n\n\nclass PathManagerError(Exception):\n \"\"\"\n Custom exception class for 'PathManager'.\n \"\"\"\n pass\n\n\nclass PathManager:\n \"\"\"\n Path manager class to store all paths to the files/directories in more convenient way.\n \"\"\"\n\n PROJECT_ROOT: pathlib.Path\n DATA_DIR: pathlib.Path\n\n TRUSTED_USERS_PATH: pathlib.Path\n ADMIN_USERS_PATH: pathlib.Path\n BOT_DATA_DB_PATH: pathlib.Path\n DOT_ENV_PATH: pathlib.Path\n\n LOGS_DIR: pathlib.Path\n OSU_API_LOGS_PATH: pathlib.Path\n TEMP_DIR: pathlib.Path\n\n @classmethod\n def set_project_root(cls, project_root: pathlib.Path):\n \"\"\"\n Sets project root and creates path for all the project files/directories.\n \"\"\"\n cls.PROJECT_ROOT = project_root\n cls.DATA_DIR = cls.PROJECT_ROOT / \"data\"\n\n cls.TRUSTED_USERS_PATH = cls.PROJECT_ROOT / \"data\" / \"trusted_users.json\"\n cls.ADMIN_USERS_PATH = cls.PROJECT_ROOT / \"data\" / \"admins.json\"\n cls.BOT_DATA_DB_PATH = cls.PROJECT_ROOT / \"data\" / \"bot_data.db\"\n cls.DOT_ENV_PATH = cls.PROJECT_ROOT / \".env\"\n\n cls.LOGS_DIR = cls.PROJECT_ROOT / \"logs\"\n cls.OSU_API_LOGS_PATH = cls.PROJECT_ROOT / \"logs\" / \"osu_api\"\n cls.TEMP_DIR = cls.PROJECT_ROOT / \"data\" / \"temp\"\n\n @classmethod\n def check_paths_existence(cls) -> None:\n \"\"\"\n Checks if all required files and directories exist.\n Raises 'PathManagerError' if any path is missing.\n \"\"\"\n paths_to_check = [\n cls.DATA_DIR,\n cls.TRUSTED_USERS_PATH,\n cls.ADMIN_USERS_PATH,\n cls.BOT_DATA_DB_PATH,\n cls.DOT_ENV_PATH,\n cls.LOGS_DIR,\n cls.OSU_API_LOGS_PATH,\n cls.TEMP_DIR\n ]\n\n # Check if all paths exist\n missing_paths = [path for path in paths_to_check if not path.exists()]\n\n if missing_paths:\n raise PathManagerError(f\"Missing paths: {missing_paths}\")\n", "repo_name": "ZyMa-1/ZyMaaDiscordBot", "sub_path": "src/core/PathManager.py", "file_name": "PathManager.py", "file_ext": "py", "file_size_in_byte": 1971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "41617106602", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport matplotlib.gridspec as gridspec\nimport scipy.linalg\nfrom tqdm import tqdm # progress bar\nspreading = __import__('2Dspreading')\n\n\ndef SDevSqRad(H,X,Y,stepsTot):\n\n SDev = np.zeros(stepsTot)\n xPosns = np.zeros(X*Y)\n yPosns = np.zeros(X*Y)\n RadialDist = np.zeros(X*Y)\n middleX = int(X/2)\n middleY = int(Y/2)\n for i in range(X*Y):\n xPosns[i] = int(i/Y)\n yPosns[i] = i%Y\n RadialDist[i] = np.sqrt((abs(xPosns[i]-middleX)**2)+(abs(yPosns[i]-middleY)**2))\n\n for step in (range(stepsTot)):\n U = scipy.linalg.expm(-1j*H*step)\n psi0 = np.zeros(X*Y)\n if X%2 != 0:\n psi0[int((X*Y)/2)] = 1 # initialise in the middle\n else:\n psi0[int((X*Y)/2-Y/2)] = 1 # initialise in the middle\n psiN = np.dot(U,psi0)\n\n probs = abs(psiN**2)\n\n AvgX = sum(RadialDist*probs)\n AvgXsq = sum((RadialDist**2)*probs)\n\n StandDev = np.sqrt(np.around((AvgXsq - (AvgX)**2),decimals=10)) # if not rounded the first value in the sqrt is negative (order e-16)\n SDev[step] = StandDev\n\n return SDev, probs\n\n\n\nif __name__ == \"__main__\":\n\n X = 10 # x size of side of lattice, keep higher than Y (horizontal tube)\n Y = 10 # y size of lattice\n Ylabel = Y\n stepsTot = 20\n trialsTot = 6 # decrease width of tube by one square per trial\n\n SDevRadAll = []\n\n for t in tqdm(range(trialsTot)):\n H = spreading.SquareTube(X,Y,structure='lattice')\n \n SDevRad, probsRad = SDevSqRad(H,X,Y,stepsTot)\n if t == 0:\n PlotProbsRad = np.zeros((Y,X))\n for i in range(X*Y):\n PlotProbsRad[i%Y][int(i/Y)] = probsRad[i]\n\n SDevRadAll.append(SDevRad)\n\n Y -= 1\n\n\n # plot\n\n fig = plt.figure(figsize=(14,4), dpi=200)\n # gs1 = gridspec.GridSpec(6, 5)\n # gs1.update(hspace=0.3)\n\n\n # ax2 = fig.add_subplot(gs1[:4,1:4])\n # col2 = ax2.pcolor(PlotProbsRad)\n # cbar2 = fig.colorbar(col2, label='probability')\n\n\n # gs2 = gridspec.GridSpec(6, 5)\n # gs2.update(hspace=0.3)\n\n # ax5 = fig.add_subplot(gs2[4:,:])\n ax5 = fig.add_subplot(111)\n for j in range(trialsTot):\n plt.plot(np.arange(stepsTot),SDevRadAll[j],label=('width: '+str(Ylabel-j)))\n plt.xlabel('steps', fontsize=18)\n plt.ylabel('$\\sigma_r$', fontsize=18)\n plt.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=18)\n ax5.tick_params(labelsize=18)\n plt.subplots_adjust(right=0.9)\n\n\n plt.show()", "repo_name": "michelevalotti/Quantum_Dynamics_on_Graphs", "sub_path": "Squares/RadialSpread.py", "file_name": "RadialSpread.py", "file_ext": "py", "file_size_in_byte": 2564, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.linalg.linalg.expm", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.linalg.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "scipy.linalg", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 37, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "8663616682", "text": "import json\nfrom dataclasses import dataclass\nfrom random import uniform\nfrom typing import List\n\nimport marshmallow\nimport marshmallow_dataclass\n\n\n@dataclass\nclass Armor:\n id: int\n name: str\n defence: float\n stamina_per_turn: float\n\n\n@dataclass\nclass Weapon:\n id: int\n name: str\n min_damage: float\n max_damage: float\n stamina_per_hit: float\n\n @property\n def damage(self):\n '''\n Выбор урона оружия случайным порядком в диапазоне мин - макс урон.\n '''\n return round(uniform(self.min_damage, self.max_damage), 1)\n\n\n@dataclass\nclass EquipmentData:\n weapons: List[Weapon]\n armors: List[Armor]\n\n\nclass Equipment:\n\n def __init__(self):\n self.equipment = self._get_equipment_data()\n\n def get_weapon(self, weapon_name) -> Weapon:\n \"\"\"\n Вооружение персонажа.\n \"\"\"\n return next(filter(lambda w: w.name == weapon_name, self.equipment.weapons))\n\n def get_armor(self, armor_name) -> Armor:\n \"\"\"\n Экипировка персонажа бронёй.\n \"\"\"\n return next(filter(lambda a: a.name == armor_name, self.equipment.armors))\n\n def get_weapons_names(self) -> list:\n \"\"\"\n Список доступного оружия.\n \"\"\"\n return [weapon.name for weapon in self.equipment.weapons]\n\n def get_armors_names(self) -> list:\n \"\"\"\n Список доступной брони.\n \"\"\"\n return [armor.name for armor in self.equipment.armors]\n\n @staticmethod\n def _get_equipment_data() -> EquipmentData:\n with open(\"./data/equipment.json\", 'r', encoding='utf-8') as equipment_file:\n data = json.load(equipment_file)\n equipment_schema = marshmallow_dataclass.class_schema(EquipmentData)\n try:\n return equipment_schema().load(data)\n except marshmallow.exceptions.ValidationError:\n raise ValueError\n", "repo_name": "saros1249/CW5", "sub_path": "equipment.py", "file_name": "equipment.py", "file_ext": "py", "file_size_in_byte": 2018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "dataclasses.dataclass", "line_number": 10, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 31, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 34, "usage_type": "name"}, {"api_name": "json.load", "line_number": 72, "usage_type": "call"}, {"api_name": "marshmallow_dataclass.class_schema", "line_number": 73, "usage_type": "call"}, {"api_name": "marshmallow.exceptions", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "11396217982", "text": "'''\nCreated on Jan 30, 2015\n\n@author: jeburks\n@author: Kris Stanton\n'''\nimport CHIRPS.utils.geo.geoutils as geoutils\nimport CHIRPS.utils.processtools.dateprocessor as dproc\nimport CHIRPS.utils.configuration.parameters as params\nimport CHIRPS.utils.file.npmemmapstorage as rp\nimport CHIRPS.utils.geo.clippedmaskgenerator as mg\nimport CHIRPS.utils.file.dateutils as dateutils\nimport CHIRPS.utils.db.bddbprocessing as bdp\nimport sys\nimport CHIRPS.utils.locallog.locallogging as llog\nimport zmq\nimport json\nimport CHIRPS.utils.processtools.uutools as uu\nimport CHIRPS.utils.file.MaskTempStorage as mst\nimport CHIRPS.utils.geo.shapefile.readShapesfromFiles as sf\nimport CHIRPS.utils.processtools.pMathOperations as pMath\nimport time\nfrom copy import deepcopy\nfrom operator import itemgetter\nimport CHIRPS.utils.RequestLog.requestLog as reqLog\nimport CHIRPS.utils.file.ExtractTifFromH5 as extractTif\nimport CHIRPS.utils.processtools.AnalysisTools as analysisTools\n\nclass ZMQCHIRPSHeadProcessor():\n \n logger = llog.getNamedLogger(\"request_processor\")\n workToBeDone = {}\n workDone = []\n name = None\n current_work_dict = None\n finished_items = []\n total_tasks_count= 0\n finished_task_count = 0\n request = None\n inputreceiver = None\n outputreceiver = None\n listeningreceiver = None\n inputconn = None\n outputconn = None\n listeningconn = None\n progress = 0\n process_start_time =0\n mathop = None\n \n # KS Refactor 2015 # Some items related to download data jobs\n isDownloadJob = False\n dj_OperationName = \"download\" # Needed by # \"if results['value'][opname] != missingValue:\"\n \n def __init__(self, name, inputconn, outputconn, listenconn):\n \n self.name = name\n self.inputconn = inputconn\n self.outputconn = outputconn\n self.listeningconn = listenconn\n self.logger.info(\"Creating Processor named: \"+self.name+\" listening on port: \"+self.inputconn+\" outputting to port: \"+self.outputconn+\" listening for output on: \"+self.listeningconn)\n \n ##Connect to the source\n self.__beginWatching__()\n \n def __beginWatching__(self):\n \n context = zmq.Context()\n \n self.inputreceiver = context.socket(zmq.PULL)\n self.inputreceiver.connect(self.inputconn)\n self.outputreceiver = context.socket(zmq.PUSH)\n self.outputreceiver.connect(self.outputconn)\n self.listenreceiver = context.socket(zmq.PULL)\n self.listenreceiver.connect(self.listeningconn)\n self.logger.info(\"Processor (\"+self.name+\") Connected and Ready\")\n self.__watchAgain__()\n \n def __watchAgain__(self):\n while(True):\n self.logger.info(\"HeadProcessor (\"+self.name+\"): Waiting for input\")\n request = json.loads(self.inputreceiver.recv())\n self.process_start_time = time.time()\n self.logger.info(\"Processing request \"+request['uniqueid'])\n self.processWork(request)\n time_total = time.time()-self.process_start_time\n self.logger.info(\"Total time: \"+str(time_total))\n \n # For download dataset types..\n def preProcessWork_ForDownloadTypes(self, request):\n if (self.isDownloadJob == True ):\n if (self.dj_OperationName == \"download\"):\n theJobID = None\n try:\n self.logger.info(\"(\"+self.name+\"):preProcessWork_ForDownloadTypes: Pre_Processing a Download Data Job. \" + str(request['uniqueid']))\n theJobID = request['uniqueid']\n outFileFolder = params.zipFile_ScratchWorkspace_Path + str(theJobID)+\"/\" \n extractTif.create_Scratch_Folder(outFileFolder)\n except:\n pass\n elif (self.dj_OperationName == \"download_all_climate_datasets\"):\n # Placeholder for download_all_climate_datasets operations.... not even sure if going to use this here..\n pass\n else:\n # This is a statistical do nothing\n return\n \n # After all the tif extracting is done, need to zip them all up in a single operation\n def postProcessWork_ForDownloadTypes(self, request):\n if (self.isDownloadJob == True ):\n if (self.dj_OperationName == \"download\"):\n theJobID = None\n try:\n self.logger.info(\"(\"+self.name+\"):postProcessWork_ForDownloadTypes: Post_Processing a Download Data Job. \" + str(request['uniqueid']))\n theJobID = request['uniqueid']\n \n # Zip the files\n zipFilePath, errorMessage = extractTif.zip_Extracted_Tif_Files_Controller(theJobID)\n if (errorMessage == None):\n self.logger.info(\"(\"+self.name+\"):postProcessWork_ForDownloadTypes: Tif files have been zipped to: \" + str(zipFilePath))\n else:\n self.logger.info(\"(\"+self.name+\"):postProcessWork_ForDownloadTypes: ERROR ZIPPING TIF FILES. errorMessage: \" + str(errorMessage))\n \n except:\n pass\n elif (self.dj_OperationName == \"download_all_climate_datasets\"):\n # Placeholder for download_all_climate_datasets operations.... not even sure if going to use this here..\n pass\n else:\n # This is a statistical do nothing\n return\n \n # Placeholder\n \n pass\n \n \n def processWork(self,request):\n #self.logger.info(\"Process Work\"+str(request))\n #self.logger.info(\"(\"+self.name+\"):processWork: Process Work: \"+str(request))\n ###Break into Chunks\n self.request = request\n \n # ks notes // Generate a list of work to be done (each item represents a time interval)\n error, workarray = self.__preProcessIncomingRequest__(request)\n \n # KS Refactor 2015 // Additional pre-setup items specific to download request types\n self.preProcessWork_ForDownloadTypes(request)\n \n # ks notes // Dispatch that list of work through the output receiver (to be picked up by workers)\n if (error == None):\n #self.logger.info(\"(\" + self.name + \"):processWork: error == None passed. \")\n\n #self.logger.info(workarray) # WAAY TOO MUCH OUTPUT...\n\n #self.total_task_count = len(workarray)\n self.worklist_length = len(workarray)\n self.total_task_count = len(workarray)\n # try:\n # if (workarray[0]['derived_product'] == True):\n # self.total_task_count = len(workarray) - 1\n # except:\n # pass\n\n self.__updateProgress__()\n\n self.logger.info(\"(\" + self.name + \"):processWork: About to do 'for item in workarray' \")\n workingArray_guid_index_list = []\n for item in workarray:\n self.workToBeDone[item['workid']] = item\n workingArray_guid_index_list.append(item['workid'])\n workingArray = deepcopy(self.workToBeDone)\n\n self.logger.info(\"(\" + self.name + \"):processWork: About to call __watchForResults_and_keepSending__ \")\n #self.logger.info(\"(\" + self.name + \"):processWork: DEBUG: workingArray \" + str(workingArray))\n self.__watchForResults_and_keepSending__(workingArray, workingArray_guid_index_list)\n\n # ks notes // Not sure why deepcopy here, but a copy is made of the work list here and that copy is sent (as json string)\n\n # Original Code, Send ALL the work items to the queue and THEN start listening for processed items.\n # The problem we are running into is that too many items are sent out before any can be processed back in.\n # Maybe it is a memory issue?\n # self.logger.info(\"(\" + self.name + \"):processWork: About to do 'for item in workingArray' (len(workingArray)): \" + str(len(workingArray)))\n # item_counter = 0\n # for item in workingArray:\n # self.outputreceiver.send_string(json.dumps(workingArray[item]))\n # self.logger.info(\"(\" + self.name + \"):processWork: outputreceiver.send for item \" + str(item_counter) + \" of \" + str(len(workingArray)))\n # item_counter = item_counter + 1\n\n # Originally, this was at the end\n #self.logger.info(\"(\" + self.name + \"):processWork: About to call __watchForResults__ \")\n #self.__watchForResults__()\n else:\n self.logger.warn(\"Got an error processing request: \"+str(error))\n \n # ks refactor 2015 - Write Error to log, (also try and get the job id from the request)\n theJobID = \"\"\n try:\n theJobID = request['uniqueid']\n except:\n theJobID = \"\"\n self.__write_JobError_To_DB__(theJobID, str(error), str(request))\n \n self.progress = -1\n self.__cleanup__()\n self.__processProgress__(self.progress)\n self.__watchAgain__()\n \n self.logger.info(\"(\"+self.name+\"):processWork: Process Work has reached the end!\")\n\n\n # Use this when the size of the worklist is too large and we need to keep sending as some come in.\n def __watchForResults_and_keepSending__(self, workingArray, workingArray_guid_index_list):\n # Send the first 1000 items.\n # As new items come in for processing, send another item out.\n\n # Break the workingArray into chunks of this size. (so only this many get sent at a time.\n message_chunkSize = 5000\n\n # If the working array is already less than the max chunksize, use the original existing method\n if(len(workingArray) < message_chunkSize):\n # Send all the messages, and then call the original mehtod, then return so none of the below stuff happens.\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: About to do 'for item in workingArray' (len(workingArray)): \" + str(len(workingArray)))\n item_counter = 0\n for item in workingArray:\n self.outputreceiver.send_string(json.dumps(workingArray[item]))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(item_counter) + \" of \" + str(len(workingArray)))\n item_counter = item_counter + 1\n\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: About to call __watchForResults__ \")\n self.__watchForResults__()\n return\n\n # How many thresholds should we be checking.\n number_of_progress_thresholds = (len(workingArray) / message_chunkSize) + 1\n current_workingArray_index = 0\n finished_sending_workingArray_data = False\n\n # Debug reporting\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: message_chunkSize : \" + str(message_chunkSize))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: len(workingArray) : \" + str(len(workingArray)))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: number_of_progress_thresholds : \" + str(number_of_progress_thresholds))\n\n\n # Send in the first chunk.\n for i in range(0, message_chunkSize):\n if (current_workingArray_index >= len(workingArray)):\n # Don't try and send this index as it does not exist!!!, just set the 'done' flag to true.\n finished_sending_workingArray_data = True\n else:\n current_workid_index = workingArray_guid_index_list[current_workingArray_index]\n self.outputreceiver.send_string(json.dumps(workingArray[current_workid_index])) # (workingArray[current_workingArray_index]))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(current_workingArray_index) + \" of \" + str(len(workingArray)))\n current_workingArray_index = current_workingArray_index + 1\n\n #last_current_progress\n last_chunk_sent = 1\n next_progress_threshold_to_check = ((100.0 / number_of_progress_thresholds) * last_chunk_sent)\n\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: ENTERING THE WHILE LOOP OF CHECKING PROGRESS.\")\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: last_chunk_sent: \" + str(last_chunk_sent))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: next_progress_threshold_to_check: \" + str(next_progress_threshold_to_check))\n\n # Start listening, and sending in future chunks.\n while (self.progress < 100):\n # Normal receiving operation.\n results = json.loads(self.listenreceiver.recv())\n self.processFinishedData(results)\n self.logger.info(\"(\"+self.name+\"):__watchForResults_and_keepSending__: self.progress: \" + str(self.progress))\n\n # Send more stuff down the queue..\n if(finished_sending_workingArray_data == True):\n # Done sending worklist items, do nothing\n pass\n else:\n # We are not done sending items... check to see if the last_current_progress changed..\n #if(last_current_progress != self.progress):\n\n if (self.progress > next_progress_threshold_to_check):\n # progress has changed...\n # Set the next progress to check\n #last_current_progress = self.progress\n last_chunk_sent = last_chunk_sent + 1\n next_progress_threshold_to_check = ((100.0 / number_of_progress_thresholds) * last_chunk_sent)\n\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: PROGRESS THRESHOLD HIT: Changing the compare for the next time\" )\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: last_chunk_sent: \" + str(last_chunk_sent))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: next_progress_threshold_to_check: \" + str(next_progress_threshold_to_check))\n self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: About to send in another chunk..\")\n\n # Send more workingArray items to be processed.\n for i in range(0, message_chunkSize):\n if (current_workingArray_index >= len(workingArray)):\n # Don't try and send this index as it does not exist!!!, just set the 'done' flag to true.\n finished_sending_workingArray_data = True\n else:\n current_workid_index = workingArray_guid_index_list[current_workingArray_index]\n self.outputreceiver.send_string(json.dumps(workingArray[current_workid_index])) #(workingArray[current_workingArray_index]))\n self.logger.info(\n \"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(\n current_workingArray_index) + \" of \" + str(len(workingArray)))\n current_workingArray_index = current_workingArray_index + 1\n\n self.__finishJob__()\n\n # # Debug reporting\n # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: message_chunkSize : \" + str(message_chunkSize))\n # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: len(workingArray) : \" + str(len(workingArray)))\n # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: number_of_progress_thresholds : \" + str(number_of_progress_thresholds))\n\n #\n #\n #\n # current_workingArray_index = 0\n # finished_sending_workingArray_data = False\n #\n # for i in range(0, 1000):\n # if (current_workingArray_index >= len(workingArray)):\n # # Don't try and send this index as it does not exist!!!, just set the 'done' flag to true.\n # finished_sending_workingArray_data = True\n # else:\n # current_workid_index = workingArray_guid_index_list[current_workingArray_index]\n # self.outputreceiver.send_string(json.dumps(workingArray[current_workid_index])) # (workingArray[current_workingArray_index]))\n # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(current_workingArray_index) + \" of \" + str(len(workingArray)))\n # current_workingArray_index = current_workingArray_index + 1\n #\n # # Keep track of progress, if it changes, send a few more. (set to 5 at this point)\n # number_of_worklist_items_to_send_in_each_batch = 5\n # last_current_progress = self.progress\n #\n #\n # while (self.progress < 100):\n # # Normal receiving operation.\n # results = json.loads(self.listenreceiver.recv())\n # self.processFinishedData(results)\n # self.logger.info(\"(\"+self.name+\"):__watchForResults_and_keepSending__: self.progress: \" + str(self.progress))\n #\n # # Send more stuff down the queue..\n # if(finished_sending_workingArray_data == True):\n # # Done sending worklist items, do nothing\n # pass\n # else:\n # # We are not done sending items... check to see if the last_current_progress changed..\n # if(last_current_progress != self.progress):\n # # progress has changed...\n # # Set the next progress to check\n # last_current_progress = self.progress\n #\n # # Send more workingArray items to be processed.\n # for i in range(0, number_of_worklist_items_to_send_in_each_batch):\n # if (current_workingArray_index >= len(workingArray)):\n # # Don't try and send this index as it does not exist!!!, just set the 'done' flag to true.\n # finished_sending_workingArray_data = True\n # else:\n # current_workid_index = workingArray_guid_index_list[current_workingArray_index]\n # self.outputreceiver.send_string(json.dumps(workingArray[current_workid_index])) #(workingArray[current_workingArray_index]))\n # self.logger.info(\n # \"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(\n # current_workingArray_index) + \" of \" + str(len(workingArray)))\n # current_workingArray_index = current_workingArray_index + 1\n #\n #\n # self.__finishJob__()\n #\n #\n # # Original chunk of code where we were sending items\n # # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: About to do 'for item in workingArray' (len(workingArray)): \" + str(len(workingArray)))\n # # item_counter = 0\n # # for item in workingArray:\n # # self.outputreceiver.send_string(json.dumps(workingArray[item]))\n # # self.logger.info(\"(\" + self.name + \"):__watchForResults_and_keepSending__: outputreceiver.send for item \" + str(item_counter) + \" of \" + str(len(workingArray)))\n # # item_counter = item_counter + 1\n #\n # # END OF __WATCHFORRESULTS_AND_KEEPSENDING__\n\n\n # Original 'watchForResults' function\n # This is the part of the code that listens for workers to be done with their processing.\n # Once finished, it fires off the __finishJob__ method which completes the job.\n def __watchForResults__(self):\n # Normal existing code pipeline.\n while (self.progress < 100):\n results = json.loads(self.listenreceiver.recv())\n self.processFinishedData(results)\n self.logger.info(\"(\"+self.name+\"):__watchForResults__: self.progress: \" + str(self.progress))\n self.__finishJob__()\n # if (self.derived_product == True):\n # self.sub_types_finished = False\n # while (self.sub_types_finished == False):\n # results = json.loads(self.listenreceiver.recv())\n # self.processFinishedData(results)\n # if(self.finished_task_count == )\n # self.__finishJob__()\n # else:\n # # Normal existing code pipeline.\n # while (self.progress < 100):\n # results = json.loads(self.listenreceiver.recv())\n # self.processFinishedData(results)\n # self.__finishJob__()\n \n \n \n def processFinishedData(self, results):\n self.logger.info(\"(\"+self.name+\"):processFinishedData: Process Finished Work \"+str(self.request))\n self.logger.info(\"(\" + self.name + \"):processFinishedData: results: \" + str(results))\n\n \n #self.logger.info(\"Process Finished Work \"+str(self.request))\n #Need to process the data\n self.finished_task_count = self.finished_task_count +1\n \n self.workToBeDone.pop(results['workid'],None)\n\n missingValue = None\n if (self.derived_product == True):\n current_data_type = results['datatype']\n missingValue = params.dataTypes[current_data_type]['fillValue'] # Now it's dynamic (passed in from the worker)\n else:\n missingValue = params.dataTypes[self.request['datatype']]['fillValue']\n\n\n # Original Line, before the MonthlyRainfallAnalysis type was setup.\n # missingValue = params.dataTypes[self.request['datatype']]['fillValue']\n\n #self.logger.info(\"Request:\"+str(self.request))\n #self.logger.info(\"Results:\"+str(results))\n \n \n # Another override\n #self.logger.info(\"HeadProcessor:processFinishedData:DEBUG: str(results) : \"+str(results))\n opname = \"\"\n if (self.isDownloadJob == True):\n # For Download Jobs.\n opname = self.dj_OperationName #self.mathop.getName()\n \n # Need to figure out why we use 'self.finished_items' and what happens if I just skip it..\n if results['value'] != missingValue:\n self.finished_items.append(results)\n \n #self.__updateProgress__()\n else:\n\n if (self.derived_product == True):\n # There is an issue here, the opname can be dynamic.. the system can't handle that the way it's built now without a lot more pipes being installed and possibly making things inefficient to the point where it just doesn't work right (without a good bit of thinking and planning anyways..)\n # # at least for this MonthlyRainfallAnalysis type of derived product, the opname will ALWAYS be average (or avg)\n opname = \"avg\" #\"MonthlyAnalysis\" # self.derived_opname = \"Unset\"\n else:\n # For Normal Types of requests. (where there is only one type of operation.)\n # For math operator type stats functions.\n opname = self.mathop.getName()\n\n # # KS Refactor 2017 - May - Pre-existing code (Before Derived Product types existed)\n # # For math operator type stats functions.\n # opname = self.mathop.getName()\n\n if results['value'][opname] != missingValue:\n self.finished_items.append(results)\n #self.__updateProgress__()\n \n self.__updateProgress__()\n \n #self.logger.info(\"Opname\"+opname)\n \n # This part of the code checks the value that the current operation returned, if it is different than the missing value, it is counted as a finished item.\n # This only applies to non-download jobs.. so here is the conditional.\n \n # Commenting old code (this was moved into the conditional above when 'download' jobs were added.)\n #if results['value'][opname] != missingValue:\n # #self.logger.info(\"Adding data\")\n # self.finished_items.append(results)\n #self.__updateProgress__()\n ##self.logger.info(\"Progress :\"+str(self.progress))\n \n def __sortData__(self,array):\n newlist = sorted(array, key=itemgetter('epochTime'))\n return newlist \n \n \n def __updateProgress__(self,output_full=False):\n self.progress = (float(self.finished_task_count)/float(self.total_task_count))*100.\n if (self.progress < 100 or output_full == True):\n self.__processProgress__(self.progress)\n\n # When this function is called, we KNOW that all the worklist items have been completed\n # We can find the values for every job inside the variable called,\n # # self.finished_items = []\n def __finishJob__(self):\n\n # KS Refactor 2015 // Pipe the request into the postprocess for download pipeline\n self.postProcessWork_ForDownloadTypes(self.request)\n \n #self.logger.info(\"Finished Job:\"+str(self.request))\n self.logger.info(\"(\"+self.name+\"):__finishJob__:Finished Job:\"+str(self.request))\n \n # KS Refactor 2015 - Logging Job Finished\n theJobID = \"\"\n try:\n theJobID = str(self.request['uniqueid'])\n except:\n theJobID = \"\"\n self.__write_JobCompleted_To_DB__(theJobID, str(self.request))\n \n self.finished_items = self.__sortData__(self.finished_items)\n# ##Output Data\n if (self.derived_product == True):\n # Special output formatting for Monthly Analysis (we don't necessarily want all the raw data (maybe we do!?)\n self.__outputDataForMonthlyAnalysis__()\n else:\n # Normal output formatting\n self.__outputData__()\n# ##Update Progress \n self.__updateProgress__(output_full=True)\n self.__cleanup__()\n# ###Back to looking for work.\n\n def __cleanup__(self):\n # self.logger.info(\"Cleanup\")\n self.total_task_count = 0;\n self.worklist_length = 0;\n self.finished_task_count = 0;\n self.current_work_dict = None\n self.finished_items = []\n # Extra stuff for derived product types.\n self.derived_product = False\n self.sub_types_finished = True # When this is False, the function that watches for finished worker progress keeps running\n self.derived_opname = \"Unset\"\n\n\n def __writeResults__(self,uniqueid,results):\n filename = params.getResultsFilename(uniqueid)\n f = open(filename, 'w')\n json.dump(results,f)\n f.close()\n f = None\n \n def __insertProgressDb__(self,uniqueid):\n conn = bdp.BDDbConnector()\n conn.setProgress(uniqueid, 0)\n conn.close()\n \n def __updateProgressDb__(self,uniqueid, progress):\n conn = bdp.BDDbConnector()\n conn.setProgress(uniqueid, progress)\n conn.close()\n \n # KS Refactor 2015 - Adding ServerSide Job Log to request logs area - Log when Jobs are started.\n def __write_JobStarted_To_DB__(self,uniqueid, objectInfo):\n try:\n theID = uniqueid\n theStatusNote = \"JobStarted\"\n theAdditionalNotes = \"Server Job: \" + str(theID) + \" has started :: Object Info: \" + str(objectInfo)\n rLog = reqLog.requestLog()\n rLog.logger = self.logger\n rLog.add_New_ServerSide_Request(theID, theStatusNote, theAdditionalNotes)\n except:\n pass\n \n \n # KS Refactor 2015 - Adding ServerSide Job Log to request logs area - Log when Jobs are completed\n def __write_JobError_To_DB__(self,uniqueid,errorMessage, objectInfo):\n try:\n theID = uniqueid\n theStatusNote = \"JobError\"\n theAdditionalNotes = \"Server Job: \" + str(theID) + \" had an Error. Error Message: \" + str(errorMessage) + \" :: Object Info: \" + str(objectInfo)\n rLog = reqLog.requestLog()\n rLog.logger = self.logger\n rLog.add_New_ServerSide_Request(theID, theStatusNote, theAdditionalNotes)\n except:\n pass\n \n \n # KS Refactor 2015 - Adding ServerSide Job Log to request logs area - Log when Jobs are completed\n def __write_JobCompleted_To_DB__(self,uniqueid, objectInfo):\n try:\n theID = uniqueid\n theStatusNote = \"JobCompleted\"\n theAdditionalNotes = \"Server Job: \" + str(theID) + \" has been completed :: Object Info: \" + str(objectInfo)\n rLog = reqLog.requestLog()\n rLog.logger = self.logger\n rLog.add_New_ServerSide_Request(theID, theStatusNote, theAdditionalNotes)\n except:\n pass\n \n def __writeMask__(self,uid,array,bounds):\n mst.writeHMaskToTempStorage(uid,array,bounds)\n def __is_custom_job_type__MonthlyGEFSRainfallAnalysis__(self, request):\n # # Inputs: From ZMQ (from the API Layer): ( A location, ( (layerid + featureids) OR ( geometry ) ), custom_job_type ( Hard coded String \"MonthlyRainfallAnalysis\" ), uniqueid )\n try:\n # uniqueid = request['uniqueid']\n # custom_job_type = request['custom_job_type']\n # # if(custom_job_type == 'MonthlyRainfallAnalysis'):\n # self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: uniqueid: \" + str(\n # uniqueid) + \", custom_job_type: \" + custom_job_type)\n #\n # self.logger.info(\n # \"TODO, FINISH THE custom_job_type PART OF THIS PREPROCESS_INCOMING_REQUEST PIPELINE....remove the return statement before finishing.\")\n\n custom_job_type = request['custom_job_type']\n if (custom_job_type == \"MonthlyGEFSRainfallAnalysis\"):\n return True\n else:\n return False #None\n\n except Exception as e:\n uniqueid = request['uniqueid']\n self.logger.warn(\"(\" + self.name + \"):Couldn't find custom_job_type in '__is_custom_job_type__MonthlyRainfallAnalysis__' in HeadProcessor: uniqueid: \" + str(\n uniqueid) + \" Exception Error Message: \" + str(e))\n return e, False # REMOVE THIS RETURN PATH, POSSIBLE EXISTING BEHAVIOR SHOULD HAPPEN HERE.\n\n return False\n\t\t\n def __is_custom_job_type__MonthlyRainfallAnalysis__(self, request):\n # # Inputs: From ZMQ (from the API Layer): ( A location, ( (layerid + featureids) OR ( geometry ) ), custom_job_type ( Hard coded String \"MonthlyRainfallAnalysis\" ), uniqueid )\n try:\n # uniqueid = request['uniqueid']\n # custom_job_type = request['custom_job_type']\n # # if(custom_job_type == 'MonthlyRainfallAnalysis'):\n # self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: uniqueid: \" + str(\n # uniqueid) + \", custom_job_type: \" + custom_job_type)\n #\n # self.logger.info(\n # \"TODO, FINISH THE custom_job_type PART OF THIS PREPROCESS_INCOMING_REQUEST PIPELINE....remove the return statement before finishing.\")\n\n custom_job_type = request['custom_job_type']\n if (custom_job_type == \"MonthlyRainfallAnalysis\"):\n return True\n else:\n return False #None\n\n except Exception as e:\n uniqueid = request['uniqueid']\n self.logger.warn(\"(\" + self.name + \"):Couldn't find custom_job_type in '__is_custom_job_type__MonthlyRainfallAnalysis__' in HeadProcessor: uniqueid: \" + str(\n uniqueid) + \" Exception Error Message: \" + str(e))\n return e, False # REMOVE THIS RETURN PATH, POSSIBLE EXISTING BEHAVIOR SHOULD HAPPEN HERE.\n\n return False\n\n def __preProcessIncomingRequest__(self, request):\n\n # Check for Custom Job Type Here.\n self.derived_product = False # Default\n is_job_type__MonthlyRainfallAnalysis = self.__is_custom_job_type__MonthlyRainfallAnalysis__(request) #False\n is_job_type__MonthlyGEFSRainfallAnalysis = self.__is_custom_job_type__MonthlyGEFSRainfallAnalysis__(request) \n if(is_job_type__MonthlyRainfallAnalysis == True):\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: This IS a 'MonthlyRainfallAnalysis' type. \")\n\n # Set up the Monthly Rainfall Analysis type here. (Note, there are return statements on both of these paths.. this should probably be moved to a separate pipeline.\n try:\n # Monthly Rainfall Analysis Setup.\n # So far, all we get as inputs from the client are the uniqueid and a geometry.\n\n # Following along the normal_ish code\n uniqueid = request['uniqueid']\n self.derived_product = True # Signals the progress counter in a various way.\n self.__insertProgressDb__(uniqueid)\n self.__write_JobStarted_To_DB__(uniqueid, str(request))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyRainfallAnalysis_Type): uniqueid: \" + str(uniqueid))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyRainfallAnalysis_Type): uniqueid: \" + str(request))\n\n #self.mathop = pMath.mathOperations(operationtype, 1, params.dataTypes[datatype]['fillValue'], None)\n self.logger.info(\n \"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyRainfallAnalysis_Type): Don't forget about this: self.mathop, it is used again in the finish job code. \")\n self.isDownloadJob = False\n self.dj_OperationName = \"NotDLoad\"\n self.derived_opname = \"MonthlyRainfallAnalysis\"\n\n # HERE IS WHAT WE ACTUALLY NEED TO RETURN...\n # Some processing of all the input params (logging things along the way)\n # # Geometry one is a little complex but the example below does work.\n # Then A bunch of stuff to setup a worklist\n # return None, worklist\n\n #worklist = []\n worklist = analysisTools.get_workList_for_headProcessor_for_MonthlyRainfallAnalysis_types(uniqueid, request)\n # if (params.DEBUG_LIVE == True):\n # self.logger.debug(\n # \"(\" + self.name + \"):__preProcessIncomingRequest__ : (MonthlyRainfallAnalysis_Type): worklist array value: \" + str(worklist))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : (MonthlyRainfallAnalysis_Type): worklist length array value: \" + str(len(worklist)))\n # With these two lines here, everything just goes into the ether.\n\n #not_finished_yet = \"TODO! NOT FINISHED YET!\"\n #return not_finished_yet, None\n\n # Sets off the job task runners.\n return None, worklist\n\n except Exception as e:\n self.logger.warn(\"(\" + self.name + \"): MonthlyRainfallAnalysis_Type: Error processing Request in HeadProcessor: uniqueid: \" + str(\n uniqueid) + \" Exception Error Message: \" + str(e))\n return e, None\n elif (is_job_type__MonthlyGEFSRainfallAnalysis == True):\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: This IS a 'CHIRPS-GEFS MonthlyRainfallAnalysis' type. \")\n # Set up the Monthly Rainfall Analysis type here. (Note, there are return statements on both of these paths.. this should probably be moved to a separate pipeline.\n try:\n # Monthly Rainfall Analysis Setup.\n # So far, all we get as inputs from the client are the uniqueid and a geometry.\n\n # Following along the normal_ish code\n uniqueid = request['uniqueid']\n self.derived_product = True # Signals the progress counter in a various way.\n self.__insertProgressDb__(uniqueid)\n self.__write_JobStarted_To_DB__(uniqueid, str(request))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyGEFSRainfallAnalysis_Type): uniqueid: \" + str(uniqueid))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyGEFSRainfallAnalysis_Type): uniqueid: \" + str(request))\n\n #self.mathop = pMath.mathOperations(operationtype, 1, params.dataTypes[datatype]['fillValue'], None)\n self.logger.info(\n \"(\" + self.name + \"):__preProcessIncomingRequest__: (MonthlyGEFSRainfallAnalysis_Type): Don't forget about this: self.mathop, it is used again in the finish job code. \")\n self.isDownloadJob = False\n self.dj_OperationName = \"NotDLoad\"\n self.derived_opname = \"MonthlyGEFSRainfallAnalysis\"\n\n # HERE IS WHAT WE ACTUALLY NEED TO RETURN...\n # Some processing of all the input params (logging things along the way)\n # # Geometry one is a little complex but the example below does work.\n # Then A bunch of stuff to setup a worklist\n # return None, worklist\n\n #worklist = []\n worklist = analysisTools.get_workList_for_headProcessor_for_MonthlyGEFSRainfallAnalysis_types(uniqueid, request)\n # if (params.DEBUG_LIVE == True):\n # self.logger.debug(\n # \"(\" + self.name + \"):__preProcessIncomingRequest__ : (MonthlyRainfallAnalysis_Type): worklist array value: \" + str(worklist))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : (MonthlyRainfallAnalysis_Type): worklist length array value: \" + str(len(worklist)))\n # With these two lines here, everything just goes into the ether.\n\n #not_finished_yet = \"TODO! NOT FINISHED YET!\"\n #return not_finished_yet, None\n\n # Sets off the job task runners.\n return None, worklist\n\n except Exception as e:\n self.logger.warn(\"(\" + self.name + \"): MonthlyRainfallAnalysis_Type: Error processing Request in HeadProcessor: uniqueid: \" + str(\n uniqueid) + \" Exception Error Message: \" + str(e))\n return e, None\n else:\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__: This is NOT a 'MonthlyRainfallAnalysis' type. \")\n\n\n # try:\n # return None, worklist\n # except Exception as e:\n # self.logger.warn(\"(\" + self.name + \"):Error processing Request in HeadProcessor: uniqueid: \" + str(uniqueid) + \" Exception Error Message: \" + str(e))\n # return e, None\n # # # Something is wrong with this new code???\n\n # (for MonthlyRainfallAnalysis Types)\n # # Notes: What needs to happen here is:\n # # # Jobs need to be split up however possible (worst case is NO splitting and one thread does ALL the analysis...)\n # # # Each thread needs to update the status when it does a chunk of the work. (simillar to existing code)\n # # # When all is done, a master process collates all the data into a single return object. (I think the entry point for that is found here in this file)\n # # # The data is returned.\n\n\n try:\n if(params.DEBUG_LIVE == True):\n self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: params.DEBUG_LIVE is set to True. There will be a lot of textual output for this run.\")\n \n uniqueid = request['uniqueid']\n self.__insertProgressDb__(uniqueid)\n self.__write_JobStarted_To_DB__(uniqueid, str(request)) # Log when Job has started. \n \n #self.logger.info(\"Processing Request: \"+uniqueid)\n self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: uniqueid: \"+str(uniqueid))\n \n datatype = request['datatype']\n begintime = request['begintime']\n endtime = request['endtime']\n intervaltype = request['intervaltype']\n \n # KS Refactor 2015 // Dirty override for download operations type.\n operationtype = request['operationtype'] # Original line (just get the operation param)\n # KS Refactor 2015 // Dirty override for download operations type.\n #self.mathop = pMath.mathOperations(operationtype,1,params.dataTypes[datatype]['fillValue'],None)\n #self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: DEBUG: About to do the DIRTY OVERRIDE! operationtype value: \"+ str(operationtype))\n if(params.parameters[operationtype][1] == 'download'):\n # If this is a download dataset request, set the self.mathop prop to 0 (or 'max' operator.. this is just so I don't have to refactor a ton of code just to get this feature working at this time... note: Refactor of this IS needed!)\n self.mathop = pMath.mathOperations(0,1,params.dataTypes[datatype]['fillValue'],None)\n \n # Additional customized code for download jobs\n self.isDownloadJob = True \n self.dj_OperationName = \"download\"\n else:\n # This is pass through for all normal requests..\n self.mathop = pMath.mathOperations(operationtype,1,params.dataTypes[datatype]['fillValue'],None) \n self.isDownloadJob = False\n self.dj_OperationName = \"NotDLoad\"\n \n #self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: DEBUG: MADE IT PASSED THE DIRTY OVERRIDE! requestID: \"+uniqueid)\n \n size = params.getGridDimension(int(datatype))\n dates = dproc.getListOfTimes(begintime, endtime,intervaltype)\n \n if (intervaltype == 0):\n dates = params.dataTypes[datatype]['indexer'].cullDateList(dates)\n \n # KS Developer Note: The issue here is that I need to only cut simple rectangle shaped images out of the data.\n # All I really need is the largest bounding box that encompases all points (regardless of how complex the original polygon was)\n # Seems simple right? :)\n # The other part of this issue is that this only needs to happen on download data requests. If I change the code for all requests, it becomes less efficient for stats type jobs.\n #self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: DEBUG ALERT: Right now, only user drawn polygons are supported for download requests. Need to write a function that gets geometry values from features as well.. VERY IMPORTANT TODO BEFORE RELEASE!!\")\n #geometry_ToPass = None\n polygon_Str_ToPass = None\n dataTypeCategory = params.dataTypes[datatype]['data_category'] # == 'ClimateModel'\n \n geotransform, wkt = rp.getSpatialReference(int(datatype))\n \n # User Drawn Polygon\n if ('geometry' in request):\n \n if(params.DEBUG_LIVE == True):\n self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: DEBUG: GEOMETRY FOUND (POLYGON DRAWN BY USER)\")\n \n \n # Get the polygon string\n polygonstring = request['geometry']\n \n # Process input polygon string\n geometry = geoutils.decodeGeoJSON(polygonstring)\n #geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)\n \n if(params.DEBUG_LIVE == True):\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : polygonstring (request['geometry']) value: \" + str(polygonstring))\n \n \n # Needed for download types\n #polygon_Str_ToPass = polygonstring \n \n # IMPORTANT BEFORE RELEASING ALL DATA DOWNLOADS\n # running the below if statement part breaks the mask generation... \n # Latest test shows that CHIRPS dataset actually produces a working image\n # and that seasonal forecasts do as well...\n # Lets see if there is a way to keep the mask on file downloads..\n \n #if(self.dj_OperationName == \"download\"):\n if((self.dj_OperationName == \"download\") | (dataTypeCategory == 'ClimateModel')):\n polygon_Str_ToPass = extractTif.get_ClimateDataFiltered_PolygonString_FromSingleGeometry(geometry)\n \n if(params.DEBUG_LIVE == True):\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : polygon_Str_ToPass (request['geometry']) value: \" + str(polygon_Str_ToPass))\n \n geometry = geoutils.decodeGeoJSON(polygon_Str_ToPass)\n bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)\n else:\n polygon_Str_ToPass = polygonstring \n bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)\n \n \n # ks refactor // Getting geometry and bounds info.\n #geometry_ToPass = geometry\n \n if(params.DEBUG_LIVE == True):\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : polygonstring (request['geometry']) value: \" + str(polygonstring))\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : (user defined polygon) geometry value: \" + str(geometry))\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : bounds value: \" + str(bounds))\n \n \n # User Selected a Feature\n elif ('layerid' in request):\n \n if(params.DEBUG_LIVE == True):\n self.logger.info(\"(\"+self.name+\"):__preProcessIncomingRequest__: DEBUG: LAYERID FOUND (FEATURE SELECTED BY USER)\")\n \n layerid = request['layerid']\n featureids = request['featureids']\n geometries = sf.getPolygons(layerid, featureids)\n \n if(params.DEBUG_LIVE == True):\n self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : (FeatureSelection) geometries value: \" + str(geometries))\n \n \n \n \n # For Download data types, convert all of the geometries into a bounding box that covers the whole map.\n # RIGHT HERE!!\n #if(self.dj_OperationName == \"download\"):\n if((self.dj_OperationName == \"download\") | (dataTypeCategory == 'ClimateModel')):\n # Convert all the geometries to the rounded polygon string, and then pass that through the system\n polygonstring = extractTif.get_ClimateDataFiltered_PolygonString_FromMultipleGeometries(geometries)\n polygon_Str_ToPass = polygonstring\n geometry = geoutils.decodeGeoJSON(polygonstring)\n bounds, mask = mg.rasterizePolygon(geotransform, size[0], size[1], geometry)\n \n else:\n \n bounds,mask = mg.rasterizePolygons(geotransform, size[0], size[1], geometries)\n \n \n \n #Break up date\n #Check for cached polygon\n #if no cached polygon exists rasterize polygon\n clippedmask = mask[bounds[2]:bounds[3],bounds[0]:bounds[1]]\n #self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : debug : Value of 'mask': \" + str(mask))\n #self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : debug : Value of 'clippedmask': \" + str(clippedmask))\n \n\n current_mask_and_storage_uuid = uniqueid\n #self.__writeMask__(uniqueid,clippedmask,bounds)\n self.__writeMask__(current_mask_and_storage_uuid, clippedmask, bounds)\n \n del mask\n del clippedmask\n worklist =[]\n for date in dates:\n workid = uu.getUUID()\n #workdict = {'uid':uniqueid,'workid':workid,'bounds':bounds,'datatype':datatype,'operationtype':operationtype, 'intervaltype':intervaltype}\n workdict = {'uid':uniqueid, 'current_mask_and_storage_uuid':current_mask_and_storage_uuid, 'workid':workid,'bounds':bounds,'datatype':datatype,'operationtype':operationtype, 'intervaltype':intervaltype, 'polygon_Str_ToPass':polygon_Str_ToPass, 'derived_product': False} #'geometryToClip':geometry_ToPass}\n if (intervaltype == 0):\n workdict['year'] = date[2]\n workdict['month'] = date[1]\n workdict['day'] = date[0]\n dateObject = dateutils.createDateFromYearMonthDay(date[2], date[1], date[0])\n workdict['isodate'] = dateObject.strftime(params.intervals[0]['pattern'])\n workdict['epochTime'] = dateObject.strftime(\"%s\")\n worklist.extend([workdict])\n elif (intervaltype == 1):\n workdict['year'] = date[1]\n workdict['month'] = date[0]\n dateObject = dateutils.createDateFromYearMonth(date[1], date[0])\n workdict['isodate'] = dateObject.strftime(params.intervals[0]['pattern'])\n workdict['epochTime'] = dateObject.strftime(\"%s\")\n worklist.extend([workdict])\n elif (intervaltype == 2):\n workdict['year'] = date\n dateObject = dateutils.createDateFromYear(date)\n workdict['isodate'] = dateObject.strftime(params.intervals[0]['pattern'])\n workdict['epochTime'] = dateObject.strftime(\"%s\")\n worklist.extend([workdict])\n # ks Refactor // Understanding how the work is distributed among worker threads.\n # # if(params.DEBUG_LIVE == True):\n # # self.logger.debug(\"(\"+self.name+\"):__preProcessIncomingRequest__ : worklist array value: \" + str(worklist))\n # self.logger.info(\n # \"(\" + self.name + \"):__preProcessIncomingRequest__ : worklist array value: \" + str(worklist))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : request['begintime']: \" + str(begintime))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : request['endtime']: \" + str(endtime))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : request['intervaltype']: \" + str(intervaltype))\n self.logger.info(\"(\" + self.name + \"):__preProcessIncomingRequest__ : dates: \" + str(dates))\n\n \n return None, worklist\n except Exception as e:\n self.logger.warn(\"(\"+self.name+\"):Error processing Request in HeadProcessor: uniqueid: \"+str(uniqueid)+\" Exception Error Message: \"+str(e))\n return e,None\n \n def __processProgress__(self, progress):\n self.__updateProgressDb__(self.request['uniqueid'],progress)\n \n def __outputData__(self):\n self.logger.info(\"outputting data for \"+self.request['uniqueid'])\n output = {'data':self.finished_items}\n self.__writeResults__(self.request['uniqueid'], output)\n\n def __outputDataForMonthlyAnalysis__(self):\n # This is the place where we KNOW the worklist is completed, and now we can do the derived product info on it.\n # Then we can output a specifically formatted array of objects that the client is ready to graph.\n # TODO! Right here, formout final output for derived product!\n #derived_product_output_list = [{\"testObjectKey\":\"testObjectValue_TODO_FINISH_THIS_CODE\"}]\n derived_product_output = analysisTools.get_output_for_MonthlyRainfallAnalysis_from(self.finished_items)\n # So in short, we want to do something like this.\n # derived_product_output_list = AnalysisTools.get_output_for_MonthlyAnalysis(self.finished_items)\n #\n # Debug, testing to see what one item from 'self.finished_items' looks like\n self.logger.info(\"Example of: self.finished_items[0]: \" + str(self.finished_items[0]) )\n\n self.logger.info(\"outputting data for \"+self.request['uniqueid'])\n\n # FOR TESTING (Outputs Raw Data AND MonthlyAnalysisChart data)\n output = {'data':self.finished_items, 'MonthlyAnalysisOutput': derived_product_output}\n\n # FOR PRODUCTION (Only outputs the stuff we need for the MonthlyAnalysisChart)\n #output = {'MonthlyAnalysisOutput': derived_product_output}\n\n self.__writeResults__(self.request['uniqueid'], output)\n \n def __processErrors__(self, errors):\n self.logger.info(\"Errors \",errors)\n \nif __name__ == \"__main__\":\n name = sys.argv[1]\n inputconn = sys.argv[2]\n outputconn = sys.argv[3]\n listenconn = sys.argv[4]\n ZMQCHIRPSHeadProcessor(name, inputconn, outputconn, listenconn)", "repo_name": "SERVIR/ClimateSERV", "sub_path": "cserv/pythonCode/servirchirpsdjango/CHIRPS/processing/zmqconnected/ZMQCHIRPSHeadProcessor.py", "file_name": "ZMQCHIRPSHeadProcessor.py", "file_ext": "py", "file_size_in_byte": 54046, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "33", "api": [{"api_name": "CHIRPS.utils.locallog.locallogging.getNamedLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "CHIRPS.utils.locallog.locallogging", "line_number": 31, "usage_type": "name"}, {"api_name": "zmq.Context", "line_number": 67, "usage_type": "call"}, {"api_name": "zmq.PULL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "zmq.PUSH", "line_number": 71, "usage_type": "attribute"}, {"api_name": "zmq.PULL", "line_number": 73, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "CHIRPS.utils.configuration.parameters.zipFile_ScratchWorkspace_Path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 96, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5.create_Scratch_Folder", "line_number": 97, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5", "line_number": 97, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5.zip_Extracted_Tif_Files_Controller", "line_number": 117, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5", "line_number": 117, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 171, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 225, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 251, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 266, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 297, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 382, "usage_type": "call"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 416, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 416, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 418, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 418, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 474, "usage_type": "call"}, {"api_name": "CHIRPS.utils.configuration.parameters.getResultsFilename", "line_number": 529, "usage_type": "call"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 529, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 531, "usage_type": "call"}, {"api_name": "CHIRPS.utils.db.bddbprocessing.BDDbConnector", "line_number": 536, "usage_type": "call"}, {"api_name": "CHIRPS.utils.db.bddbprocessing", "line_number": 536, "usage_type": "name"}, {"api_name": "CHIRPS.utils.db.bddbprocessing.BDDbConnector", "line_number": 541, "usage_type": "call"}, {"api_name": "CHIRPS.utils.db.bddbprocessing", "line_number": 541, "usage_type": "name"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog.requestLog", "line_number": 551, "usage_type": "call"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog", "line_number": 551, "usage_type": "name"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog.requestLog", "line_number": 564, "usage_type": "call"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog", "line_number": 564, "usage_type": "name"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog.requestLog", "line_number": 577, "usage_type": "call"}, {"api_name": "CHIRPS.utils.RequestLog.requestLog", "line_number": 577, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.MaskTempStorage.writeHMaskToTempStorage", "line_number": 584, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.MaskTempStorage", "line_number": 584, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools.get_workList_for_headProcessor_for_MonthlyRainfallAnalysis_types", "line_number": 673, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools", "line_number": 673, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools.get_workList_for_headProcessor_for_MonthlyGEFSRainfallAnalysis_types", "line_number": 719, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools", "line_number": 719, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 756, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 756, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.parameters", "line_number": 776, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 776, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.pMathOperations.mathOperations", "line_number": 778, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.pMathOperations", "line_number": 778, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 778, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 778, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.pMathOperations.mathOperations", "line_number": 785, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.pMathOperations", "line_number": 785, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 785, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 785, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.getGridDimension", "line_number": 791, "usage_type": "call"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 791, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.dateprocessor.getListOfTimes", "line_number": 792, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.dateprocessor", "line_number": 792, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 795, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 795, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.dataTypes", "line_number": 804, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 804, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.npmemmapstorage.getSpatialReference", "line_number": 806, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.npmemmapstorage", "line_number": 806, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 811, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 811, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.geoutils.decodeGeoJSON", "line_number": 819, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.geoutils", "line_number": 819, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 822, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 822, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5.get_ClimateDataFiltered_PolygonString_FromSingleGeometry", "line_number": 837, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5", "line_number": 837, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 839, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 839, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.geoutils.decodeGeoJSON", "line_number": 842, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.geoutils", "line_number": 842, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator.rasterizePolygon", "line_number": 843, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator", "line_number": 843, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator.rasterizePolygon", "line_number": 846, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator", "line_number": 846, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 852, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 852, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 861, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 861, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.shapefile.readShapesfromFiles.getPolygons", "line_number": 866, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.shapefile.readShapesfromFiles", "line_number": 866, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.DEBUG_LIVE", "line_number": 868, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 868, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5.get_ClimateDataFiltered_PolygonString_FromMultipleGeometries", "line_number": 879, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.ExtractTifFromH5", "line_number": 879, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.geoutils.decodeGeoJSON", "line_number": 881, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.geoutils", "line_number": 881, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator.rasterizePolygon", "line_number": 882, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator", "line_number": 882, "usage_type": "name"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator.rasterizePolygons", "line_number": 886, "usage_type": "call"}, {"api_name": "CHIRPS.utils.geo.clippedmaskgenerator", "line_number": 886, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.uutools.getUUID", "line_number": 906, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.uutools", "line_number": 906, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.dateutils.createDateFromYearMonthDay", "line_number": 913, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.dateutils", "line_number": 913, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.intervals", "line_number": 914, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 914, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.dateutils.createDateFromYearMonth", "line_number": 920, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.dateutils", "line_number": 920, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.intervals", "line_number": 921, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 921, "usage_type": "name"}, {"api_name": "CHIRPS.utils.file.dateutils.createDateFromYear", "line_number": 926, "usage_type": "call"}, {"api_name": "CHIRPS.utils.file.dateutils", "line_number": 926, "usage_type": "name"}, {"api_name": "CHIRPS.utils.configuration.parameters.intervals", "line_number": 927, "usage_type": "attribute"}, {"api_name": "CHIRPS.utils.configuration.parameters", "line_number": 927, "usage_type": "name"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools.get_output_for_MonthlyRainfallAnalysis_from", "line_number": 959, "usage_type": "call"}, {"api_name": "CHIRPS.utils.processtools.AnalysisTools", "line_number": 959, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 980, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 981, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 982, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 983, "usage_type": "attribute"}]} +{"seq_id": "14182700567", "text": "import io\nimport unittest\n\nimport numpy as np\n\nfrom Orange.data import ContinuousVariable, DiscreteVariable\nfrom Orange.data.io import TabDelimFormat\n\n\nclass TestTabReader(unittest.TestCase):\n\n def setUp(self):\n DiscreteVariable._clear_cache()\n\n def test_read_easy(self):\n simplefile = \"\"\"\\\n Feature 1\\tFeature 2\\tClass 1\\tClass 42\n c \\tM F \\tc \\td\n \\t \\tclass \\tclass\n 1.0 \\tM \\t5 \\trich\n \\tF \\t7 \\tpoor\n 2.0 \\tM \\t4 \\t\n \"\"\"\n\n file = io.StringIO(simplefile)\n table = TabDelimFormat()._read_file(file)\n\n f1, f2, c1, c2 = table.domain.variables\n self.assertIsInstance(f1, ContinuousVariable)\n self.assertEqual(f1.name, \"Feature 1\")\n self.assertIsInstance(f2, DiscreteVariable)\n self.assertEqual(f2.name, \"Feature 2\")\n self.assertIsInstance(c1, ContinuousVariable)\n self.assertEqual(c1.name, \"Class 1\")\n self.assertIsInstance(c2, DiscreteVariable)\n self.assertEqual(c2.name, \"Class 42\")\n\n np.testing.assert_almost_equal(table.X, np.array([[1, 0], [np.nan, 1], [2, 0]]))\n np.testing.assert_almost_equal(table.Y, np.array([[5, 1], [7, 0], [4, np.nan]]))\n\n def test_read_and_save_attributes(self):\n samplefile = \"\"\"\\\n Feature 1\\tFeature 2\\tClass 1\\tClass 42\n c \\tM F \\tc \\td\n \\ta=1 b=2 \\tclass x=a\\\\ longer\\\\ string \\tclass\n 1.0 \\tM \\t5 \\trich\n \"\"\"\n file = io.StringIO(samplefile)\n table = TabDelimFormat()._read_file(file)\n\n f1, f2, c1, c2 = table.domain.variables\n self.assertIsInstance(f2, DiscreteVariable)\n self.assertEqual(f2.name, \"Feature 2\")\n self.assertEqual(f2.attributes, {'a': '1', 'b': '2'})\n self.assertIn(c1, table.domain.class_vars)\n self.assertIsInstance(c1, ContinuousVariable)\n self.assertEqual(c1.name, \"Class 1\")\n self.assertEqual(c1.attributes, {'x': 'a longer string'})\n\n outf = io.StringIO()\n outf.close = lambda: None\n TabDelimFormat.write_file(outf, table)\n saved = outf.getvalue()\n\n file = io.StringIO(saved)\n table = TabDelimFormat()._read_file(file)\n\n f1, f2, c1, c2 = table.domain.variables\n self.assertIsInstance(f2, DiscreteVariable)\n self.assertEqual(f2.name, \"Feature 2\")\n self.assertEqual(f2.attributes, {'a': '1', 'b': '2'})\n self.assertIn(c1, table.domain.class_vars)\n self.assertIsInstance(c1, ContinuousVariable)\n self.assertEqual(c1.name, \"Class 1\")\n self.assertEqual(c1.attributes, {'x': 'a longer string'})\n\n def test_reuse_variables(self):\n file1 = io.StringIO(\"\\n\".join(\"xd dbac\"))\n t1 = TabDelimFormat()._read_file(file1)\n\n self.assertSequenceEqual(t1.domain['x'].values, 'abcd')\n np.testing.assert_almost_equal(t1.X.ravel(), [3, 1, 0, 2])\n\n file2 = io.StringIO(\"\\n\".join(\"xd hgacb\"))\n t2 = TabDelimFormat()._read_file(file2)\n\n self.assertSequenceEqual(t2.domain['x'].values, 'abcdgh')\n np.testing.assert_almost_equal(t2.X.ravel(), [5, 4, 0, 2, 1])\n", "repo_name": "jujuefengliu/orange3", "sub_path": "Orange/tests/test_tab_reader.py", "file_name": "test_tab_reader.py", "file_ext": "py", "file_size_in_byte": 3255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "42", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "Orange.data.DiscreteVariable._clear_cache", "line_number": 13, "usage_type": "call"}, {"api_name": "Orange.data.DiscreteVariable", "line_number": 13, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 25, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 26, "usage_type": "call"}, {"api_name": "Orange.data.ContinuousVariable", "line_number": 29, "usage_type": "argument"}, {"api_name": "Orange.data.DiscreteVariable", "line_number": 31, "usage_type": "argument"}, {"api_name": "Orange.data.ContinuousVariable", "line_number": 33, "usage_type": "argument"}, {"api_name": "Orange.data.DiscreteVariable", "line_number": 35, "usage_type": "argument"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 39, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 48, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 49, "usage_type": "call"}, {"api_name": "Orange.data.DiscreteVariable", "line_number": 52, "usage_type": "argument"}, {"api_name": "Orange.data.ContinuousVariable", "line_number": 56, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 60, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat.write_file", "line_number": 62, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 62, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 65, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 66, "usage_type": "call"}, {"api_name": "Orange.data.DiscreteVariable", "line_number": 69, "usage_type": "argument"}, {"api_name": "Orange.data.ContinuousVariable", "line_number": 73, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 78, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 82, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 84, "usage_type": "call"}, {"api_name": "Orange.data.io.TabDelimFormat", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.testing.assert_almost_equal", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 88, "usage_type": "attribute"}]} +{"seq_id": "6522086164", "text": "\"\"\"This script deletes all questions from database and reads questions from a JSON file into database\"\"\"\n\nimport json\nimport os\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nif BASE_DIR not in sys.path:\n\tsys.path.append(BASE_DIR)\n\ndef add_attrs(obj, attr_list, data, force=True):\n\tfor attr in attr_list:\n\t\tif force or attr in data:\n\t\t\tsetattr(obj, attr, data[attr])\n\ndef add_ques_list(fname):\n\tQuestion.objects.all().delete()\n\twith open(fname) as qfile:\n\t\tdata = json.load(qfile)\n\t\tfor (i, ques) in enumerate(data):\n\t\t\tq = Question(qno=i+1)\n\t\t\tadd_attrs(q, ('score', 'corrans'), ques)\n\t\t\tadd_attrs(q, ('title',), ques, False)\n\t\t\tif \"hint\" in ques:\n\t\t\t\tadd_attrs(q, ('hint', 'hint_penalty'), ques)\n\t\t\t\tq.hint_enabled = True\n\t\t\telse:\n\t\t\t\tq.hint_enabled = False\n\t\t\tq.save()\n\nif __name__==\"__main__\":\n\t# set up django\n\tos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"project_conf.settings\")\n\tprint(\"Setting up Django ...\", flush=True, end='')\n\timport django\n\tdjango.setup()\n\tprint(\" done\")\n\nfrom main.models import Question\n\nif __name__==\"__main__\":\n\tadd_ques_list(sys.argv[1])\n", "repo_name": "bitsacm/IW16", "sub_path": "scripts/add_ques.py", "file_name": "add_ques.py", "file_ext": "py", "file_size_in_byte": 1113, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ.setdefault", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "37252888260", "text": "from django import forms\nfrom groups.models import Group\n\n\ndef set_field_html_name(cls, new_name):\n \"\"\"\n This creates wrapper around the normal widget rendering,\n allowing for a custom field name (new_name).\n \"\"\"\n old_render = cls.widget.render\n def _widget_render_wrapper(name, value, attrs=None):\n return old_render(new_name, value, attrs)\n\n cls.widget.render = _widget_render_wrapper\n\nclass GroupForm(forms.ModelForm):\n\n class Meta():\n model = Group\n fields = ['name', 'description']\n\n\n widgets = {\n 'name': forms.TextInput(attrs = {'class': 'textinputclass',\n 'type':'text', 'placeholder' : 'Название новой супер идеи'}),\n 'description': forms.Textarea(attrs = {'class': 'editable \\\n medium-editor-textarea postcontent',\n 'placeholder': False}),\n }\n\n labels = {\n 'name':'',\n 'description':'Описание'\n }\n\n\n", "repo_name": "esnorta/ideabank", "sub_path": "groups/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "django.forms.ModelForm", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "groups.models.Group", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "19944094112", "text": "from typing import TYPE_CHECKING, List\nfrom datetime import timedelta, datetime, date\nimport logging\n\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template import Context, Template as DjangoTemplate\nfrom django.utils.html import strip_tags\nfrom django.conf import settings\n\nfrom mailing.settings import (CONFIRMATION_TYPE_MESSAGE,\n NOTIFICATION_TYPE_MESSAGE)\nfrom mailing.build_tpl import build_ttt_table\n\nif TYPE_CHECKING:\n from core.models import Order, OrderItem\n from mailing.data_classes import DeepSales\n\nlogger = logging.getLogger(\"main\")\n\n\ndef send(order: 'Order', order_items: 'List[OrderItem]',\n domain: str, type_msg: str) -> bool:\n \"\"\"\n Send mail\n\n :param order: order of user\n :param order_items: items of order\n :param domain: domain to unsubscribe\n :param type_msg: type of message (for pick template)\n :return:\n \"\"\"\n from core.models import Order\n from mailing.models import Template\n\n from_email = settings.DEFAULT_FROM_EMAIL\n\n template_obj = Template.objects.get(slug=type_msg)\n template = template_obj.rus_template\n subject = template_obj.rus_title\n\n if order.lng == Order.ENG_LANGUAGE:\n template = template_obj.eng_template\n subject = template_obj.eng_title\n tpl = DjangoTemplate(template)\n pixel_link = \"{}/api/tr.png?token={}&status={}\".format(\n domain,\n order.token,\n type_msg\n )\n if type_msg == NOTIFICATION_TYPE_MESSAGE:\n order_items_id = [str(item.pk) for item in order_items]\n pixel_link = \"{}&items={}\".format(pixel_link, \",\".join(order_items_id))\n\n context = Context(\n {\n 'name': order.user_name,\n 'dt_dp': order.dt_dp,\n 'dp': order.dp,\n 'd': order.d,\n 'ttt_table': build_ttt_table(order_items, order.lng),\n 'buy_link': \"\",\n 'unsubscribe_link': \"/unsubscribe?token={}&lng={}\".format(\n order.token, order.lng),\n 'pixel': \"\".format(pixel_link)\n }\n )\n html_content = tpl.render(context)\n text_content = strip_tags(html_content)\n msg = EmailMultiAlternatives(subject, text_content, from_email,\n [order.user_email])\n msg.attach_alternative(html_content, \"text/html\")\n return msg.send()\n\n\ndef send_confirmation(order: 'Order', domain: str):\n \"\"\"\n Fill template and send confirmation mail\n\n :param order: order of user\n :param domain: domain to unsubscribe\n :return:\n \"\"\"\n from core.models import Order, OrderItem\n order_items = OrderItem.objects.filter(order=order)\n\n send(order, order_items, domain, CONFIRMATION_TYPE_MESSAGE)\n\n order.status = Order.ACTIVE_STATUS\n order.save()\n\n\ndef send_notification(order_items: 'List[OrderItem]',\n sales_depth: int, domain: str):\n \"\"\"\n Fill template and send notification mail\n\n :param order_items: items of order\n :param sales_depth: day to sales\n :param domain: domain of site\n :return:\n \"\"\"\n from core.models import OrderItem\n\n day_before = 1\n sales_depth += day_before\n today = date.today()\n\n for order_item in order_items:\n dt_start_sale = order_item.dt_dp - timedelta(days=sales_depth)\n\n logger.debug(\"Dates: {} - {}, Tt: {}\".format(\n today, dt_start_sale.date(), order_item.ttt_id))\n\n if dt_start_sale.date() > today:\n continue\n\n logger.debug(\"Tt: {}, Send Email\".format(order_item.ttt_id))\n\n order_item.status = order_item.SUCCESS_SEND_STATUS\n\n if not send(order_item.order, [order_item], domain,\n NOTIFICATION_TYPE_MESSAGE):\n order_item.status = order_item.FAIL_SEND_STATUS\n\n order_item.dt_finish = datetime.now()\n order_item.save()\n\n # are there order items. if not - set done status\n rest_items = OrderItem.objects.filter(order=order_item.order).exclude(\n status=order_item.FAIL_SEND_STATUS).exclude(\n status=order_item.SUCCESS_SEND_STATUS)\n\n if not rest_items:\n order_item.order.status = order_item.order.DONE_STATUS\n order_item.order.dt_finish = datetime.now()\n order_item.order.save()\n", "repo_name": "vladz/drafts", "sub_path": "django-admin/ga/mailing/send_mail.py", "file_name": "send_mail.py", "file_ext": "py", "file_size_in_byte": 4307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 14, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "mailing.models.Template.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "mailing.models.Template.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mailing.models.Template", "line_number": 37, "usage_type": "name"}, {"api_name": "core.models.Order.ENG_LANGUAGE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "core.models.Order", "line_number": 41, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 44, "usage_type": "call"}, {"api_name": "mailing.settings.NOTIFICATION_TYPE_MESSAGE", "line_number": 50, "usage_type": "name"}, {"api_name": "django.template.Context", "line_number": 54, "usage_type": "call"}, {"api_name": "mailing.build_tpl.build_ttt_table", "line_number": 60, "usage_type": "call"}, {"api_name": "django.utils.html.strip_tags", "line_number": 68, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 69, "usage_type": "call"}, {"api_name": "core.models.OrderItem.objects.filter", "line_number": 84, "usage_type": "call"}, {"api_name": "core.models.OrderItem.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "core.models.OrderItem", "line_number": 84, "usage_type": "name"}, {"api_name": "mailing.settings.CONFIRMATION_TYPE_MESSAGE", "line_number": 86, "usage_type": "argument"}, {"api_name": "core.models.Order.ACTIVE_STATUS", "line_number": 88, "usage_type": "attribute"}, {"api_name": "core.models.Order", "line_number": 88, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 106, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 109, "usage_type": "call"}, {"api_name": "mailing.settings.NOTIFICATION_TYPE_MESSAGE", "line_number": 122, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "name"}, {"api_name": "core.models.OrderItem.objects.filter", "line_number": 129, "usage_type": "call"}, {"api_name": "core.models.OrderItem.objects", "line_number": 129, "usage_type": "attribute"}, {"api_name": "core.models.OrderItem", "line_number": 129, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "23211828008", "text": "import math\nfrom typing import List\n\nfrom entity_types import Coords, FunctionRepr\n\n\nclass Func:\n\tdef __init__(self, func:FunctionRepr=None):\n\t\tself.numerator = func.numerator\n\t\tself.denominator = func.denominator\n\t\t\n\n\tdef get_coords_by(self, x0:int, x_last:int)->List[Coords]:\n\t\tcoords = []\n\t\tfor x in range(x0, x_last):\n\t\t\ty = self.get_y_by_x(x)\n\t\t\tcoords.append(Coords(x=x, y=y))\n\t\treturn coords\n\n\n\tdef set_funcRepr(self, new_func:FunctionRepr):\n\t\tself.numerator = new_func.numerator\n\t\tself.denominator = new_func.denominator\n\n\n\tdef get_y_by_x(self, x:int)->int:\n\t\ty_numerator = sum([kf*(x**dg) for kf, dg in self.numerator])\n\t\ty_denominator = sum([kf*(x**dg) for kf, dg in self.denominator])\n\n\t\tif len(self.denominator) == 0:\n\t\t\t# Denominator dont exist\n\t\t\treturn y_numerator\n\t\telif y_denominator == 0:\n\t\t\t# Denominator is exist and equal 0. In the case y=infinite\n\t\t\treturn 10**6\n\n\t\t# Always round up to the 2nd digit before dot\n\t\ty = int((y_numerator/y_denominator)*100)/100\n\t\treturn y", "repo_name": "DimaM315/visual_graph_of_the_function", "sub_path": "services/mathCore/Func.py", "file_name": "Func.py", "file_ext": "py", "file_size_in_byte": 991, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "42", "api": [{"api_name": "entity_types.FunctionRepr", "line_number": 8, "usage_type": "name"}, {"api_name": "entity_types.Coords", "line_number": 17, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "entity_types.Coords", "line_number": 13, "usage_type": "name"}, {"api_name": "entity_types.FunctionRepr", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "8594986567", "text": "import os\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport re\nfrom sklearn.utils import shuffle\nimport datetime\nimport h5py\nfrom mpl_toolkits.mplot3d import Axes3D\nimport torch\nprint(torch.__version__)\nimport torch.nn.functional as F\nfrom torch.autograd import grad\nimport matplotlib as mpl\nimport numpy.random as npr\nimport scipy.integrate as sp\nfrom pyevtk.hl import gridToVTK\nimport pandas as pd \nimport numpy.linalg as la\nfrom torch.multiprocessing import Process, Pool\nfrom NumIntg import *\n# import rff\nfrom sklearn.preprocessing import StandardScaler\nfrom torch_geometric.data import InMemoryDataset, Data\nfrom sklearn.model_selection import train_test_split\nimport torch_geometric.transforms as T\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv, ChebConv, GATConv, TransformerConv, TAGConv, ARMAConv, SGConv, MFConv, RGCNConv\nfrom scipy.spatial.distance import pdist, squareform\nimport networkx as nx\nfrom pprint import pprint\nimport pyvista as pv\ntorch.manual_seed(2022)\nmpl.rcParams['figure.dpi'] = 350\n\n\ntorch.cuda.is_available = lambda : False\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndevice = torch.device('cpu')\nif torch.cuda.is_available():\n print(\"CUDA is available, running on GPU\")\n device = torch.device('cuda')\n device_string = 'cuda'\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nelse:\n device_string = 'cpu'\n print(\"CUDA not available, running on CPU\")\n\ndef setup_domain():\n x_dom = 0, Length, Nx\n y_dom = 0, Width, Ny\n z_dom = 0, Depth, Nz\n # create points\n lin_x = np.linspace(x_dom[0], x_dom[1], x_dom[2])\n lin_y = np.linspace(y_dom[0], y_dom[1], y_dom[2])\n lin_z = np.linspace(z_dom[0], z_dom[1], z_dom[2])\n domEn = np.zeros((Nx * Ny * Nz, 3))\n c = 0\n for z in np.nditer(lin_z):\n for x in np.nditer(lin_x):\n tb = y_dom[2] * c\n te = tb + y_dom[2]\n c += 1\n domEn[tb:te, 0] = x\n domEn[tb:te, 1] = lin_y\n domEn[tb:te, 2] = z\n print('Uniform Nodes', domEn.shape)\n # np.meshgrid(lin_x, lin_y, lin_z)\n\n dom = torch.from_numpy(domEn).float()\n t1 = time.time()\n G = create_graph(dom)\n print( 'Building graph took ' + str(time.time()-t1) + ' s' )\n\n # ------------------------------------ BOUNDARY ----------------------------------------\n # Left boundary condition (Dirichlet BC)\n bcl_u_pts_idx = np.where(dom[:, 0] == 0)\n # Right boundary condition (Neumann BC)\n bcr_t_pts_idx = np.where(dom[:, 0] == Length)\n top_idx = np.where((dom[:, 1]==Width) & (dom[:, 0]>0) & (dom[:, 0]0) & (dom[:, 0]0) & (dom[:, 0]0) & (dom[:, 0]dx):\n # print(\"i=\",i, \"j=\",j)\n G.remove_edge(i,j)\n # analyze_graph(G)\n # pprint(vars(G))\n\n return G\n\n\ndef analyze_graph(G):\n nx.draw(G)\n print(\"nodes=\", G.number_of_nodes(), \"edges=\", G.number_of_edges())\n plt.show()\n \n# custom dataset\nclass MeshDataSet(InMemoryDataset):\n def __init__(self, transform=None):\n super(MeshDataSet, self).__init__('.', transform, None, None)\n\n data = Data(edge_index=edge_index)\n \n data.num_nodes = G.number_of_nodes()\n \n # Using degree as embedding\n embeddings = np.zeros((G.number_of_nodes(),3))\n for i in range(G.number_of_nodes()):\n embeddings[i,:] = G.nodes[i]['coordinates']\n data.nodes = torch.from_numpy(embeddings).float()\n # normalizing degree values\n scale = StandardScaler()\n embeddings = scale.fit_transform(embeddings.reshape(-1,3))\n \n # embedding \n data.x = torch.from_numpy(embeddings).float()\n \n\n data.num_classes = 3\n\n\n n_nodes = G.number_of_nodes()\n \n # create train and test masks for data\n X_train = pd.Series(list(G.nodes()))\n train_mask = torch.zeros(n_nodes, dtype=torch.bool)\n train_mask[X_train.index] = True\n data['train_mask'] = train_mask\n\n self.data, self.slices = self.collate([data])\n\n def _download(self):\n return\n\n def _process(self):\n return\n\n def __repr__(self):\n return '{}()'.format(self.__class__.__name__)\n \n\n# GCN model \nclass GCNet(torch.nn.Module):\n def __init__(self, D_in, H, D_out , act_fn ):\n super(GCNet, self).__init__()\n self.act_fn = act_fn\n \n # self.conv1 = GCNConv(D_in, H)\n # self.conv2 = GCNConv(H, 2*H)\n # self.conv3 = GCNConv(2*H, 4*H)\n # self.conv4 = GCNConv(4*H, 2*H)\n # self.conv5 = GCNConv(2*H, H)\n # self.conv6 = GCNConv(H, D_out)\n \n kk=1\n self.conv1 = ChebConv(D_in, H, K=kk)\n self.conv2 = ChebConv(H, 2*H, K=kk)\n self.conv3 = ChebConv(2*H, 4*H, K=kk)\n self.conv4 = ChebConv(4*H, 2*H, K=kk)\n self.conv5 = ChebConv(2*H, H, K=kk)\n # self.conv6 = ChebConv(H, D_out, K=kk)\n \n # self.conv1 = GATConv(D_in, H)\n # self.conv2 = GATConv(H, 2*H)\n # self.conv3 = GATConv(2*H, 4*H)\n # self.conv4 = GATConv(4*H, 2*H)\n # self.conv5 = GATConv(2*H, H)\n # self.conv6 = GATConv(H, D_out)\n \n # self.conv1 = TransformerConv(D_in, H)\n # self.conv2 = TransformerConv(H, 2*H)\n # self.conv3 = TransformerConv(2*H, 4*H)\n # self.conv4 = TransformerConv(4*H, 2*H)\n # self.conv5 = TransformerConv(2*H, H)\n # self.conv6 = TransformerConv(H, D_out)\n \n # self.conv1 = TAGConv(D_in, H)\n # self.conv2 = TAGConv(H, 2*H)\n # self.conv3 = TAGConv(2*H, 4*H)\n # self.conv4 = TAGConv(4*H, 2*H)\n # self.conv5 = TAGConv(2*H, H)\n # self.conv6 = TAGConv(H, D_out)\n \n # self.conv1 = ARMAConv(D_in, H)\n # self.conv2 = ARMAConv(H, 2*H)\n # self.conv3 = ARMAConv(2*H, 4*H)\n # self.conv4 = ARMAConv(4*H, 2*H)\n # self.conv5 = ARMAConv(2*H, H)\n # self.conv6 = ARMAConv(H, D_out)\n \n # self.conv1 = SGConv(D_in, H)\n # self.conv2 = SGConv(H, 2*H)\n # self.conv3 = SGConv(2*H, 4*H)\n # self.conv4 = SGConv(4*H, 2*H)\n # self.conv5 = SGConv(2*H, H)\n # self.conv6 = SGConv(H, D_out)\n \n # self.conv1 = MFConv(D_in, H)\n # self.conv2 = MFConv(H, 2*H)\n # self.conv3 = MFConv(2*H, 4*H)\n # self.conv4 = MFConv(4*H, 2*H)\n # self.conv5 = MFConv(2*H, H)\n # self.conv6 = MFConv(H, D_out)\n \n self.linear1 = torch.nn.Linear(H,D_out)\n \n def forward(self, x_coord, edge_index ):\n af_mapping = { 'tanh' : torch.tanh ,\n 'relu' : torch.nn.ReLU() ,\n 'rrelu' : torch.nn.RReLU() ,\n 'sigmoid' : torch.sigmoid }\n activation_fn = af_mapping[ self.act_fn ] \n \n y = self.conv1(x_coord, edge_index)\n y = activation_fn(y)\n y = self.conv2(y, edge_index)\n y = activation_fn(y)\n y = self.conv3(y, edge_index)\n y = activation_fn(y)\n y = self.conv4(y, edge_index)\n y = activation_fn(y)\n y = self.conv5(y, edge_index)\n y = activation_fn(y)\n\n # Output\n y = self.linear1(y)\n return y\n \ndef loss_sum(tinput):\n return torch.sum(tinput) / tinput.data.nelement()\n \ndef innerproduct(A,B):\n Z = (A[:,0,0] * B[:,0,0] + A[:,0,1] * B[:,0,1] + A[:,0,2] * B[:,0,2] +\n A[:,1,0] * B[:,1,0] + A[:,1,1] * B[:,1,1] + A[:,1,2] * B[:,1,2] +\n A[:,2,0] * B[:,2,0] + A[:,2,1] * B[:,2,1] + A[:,2,2] * B[:,2,2])\n \n return Z\n \ndef determinant(F):\n\n detF = (F[:,0,0] * (F[:,1,1] * F[:,2,2] - F[:,1,2] * F[:,2,1])) - (\n F[:,0,1] * (F[:,1,0] * F[:,2,2] - F[:,1,2] * F[:,2,0])) + (\n F[:,0,2] * (F[:,1,0] * F[:,2,1] - F[:,1,1] * F[:,2,0]))\n \n return detF\n \ndef inverse(F):\n \n detF = determinant(F)\n F_inv = torch.empty((len(F),3,3)) \n F_inv[:,0,0] = (F[:,1,1] * F[:,2,2] - F[:,1,2] * F[:,2,1]) / detF\n F_inv[:,0,1] = -(F[:,0,1] * F[:,2,2] - F[:,0,2] * F[:,2,1]) / detF\n F_inv[:,0,2] = (F[:,0,1] * F[:,1,2] - F[:,0,2] * F[:,1,1]) / detF\n F_inv[:,1,0] = -(F[:,1,0] * F[:,2,2] - F[:,1,2] * F[:,2,0]) / detF\n F_inv[:,1,1] = (F[:,0,0] * F[:,2,2] - F[:,0,2] * F[:,2,0]) / detF\n F_inv[:,1,2] = -(F[:,0,0] * F[:,1,2] - F[:,0,2] * F[:,1,0]) / detF\n F_inv[:,2,0] = (F[:,1,0] * F[:,2,1] - F[:,1,1] * F[:,2,0]) / detF\n F_inv[:,2,1] = -(F[:,0,0] * F[:,2,1] - F[:,0,1] * F[:,2,0]) / detF\n F_inv[:,2,2] = (F[:,0,0] * F[:,1,1] - F[:,0,1] * F[:,1,0]) / detF\n \n return F_inv\n\ndef trace(A):\n\n trace_A = A[:,0,0] + A[:,1,1] + A[:,2,2]\n\n return trace_A\n\ndef displacement_gradient(u,x):\n\n gradu = torch.empty((len(x),3,3))\n \n duxdxyz = grad(u[:, 0].unsqueeze(1), x, torch.ones(x.size()[0], 1, device=device), create_graph=True, retain_graph=True)[0]\n duydxyz = grad(u[:, 1].unsqueeze(1), x, torch.ones(x.size()[0], 1, device=device), create_graph=True, retain_graph=True)[0]\n duzdxyz = grad(u[:, 2].unsqueeze(1), x, torch.ones(x.size()[0], 1, device=device), create_graph=True, retain_graph=True)[0]\n \n du11 = duxdxyz[:, 0].unsqueeze(1); du12 = duxdxyz[:, 1].unsqueeze(1); du13 = duxdxyz[:, 2].unsqueeze(1)\n du21 = duydxyz[:, 0].unsqueeze(1); du22 = duydxyz[:, 1].unsqueeze(1); du23 = duydxyz[:, 2].unsqueeze(1)\n du31 = duzdxyz[:, 0].unsqueeze(1); du32 = duzdxyz[:, 1].unsqueeze(1); du33 = duzdxyz[:, 2].unsqueeze(1)\n \n gradu[:,0,0] = du11.squeeze(1); gradu[:,0,1] = du12.squeeze(1); gradu[:,0,2] = du13.squeeze(1)\n gradu[:,1,0] = du21.squeeze(1); gradu[:,1,1] = du22.squeeze(1); gradu[:,1,2] = du23.squeeze(1)\n gradu[:,2,0] = du31.squeeze(1); gradu[:,2,1] = du32.squeeze(1); gradu[:,2,2] = du33.squeeze(1)\n\n # For diagonal case\n # gradu[:,0,1]=0; gradu[:,0,2]=0; gradu[:,1,0]=0; gradu[:,1,2]=0; gradu[:,2,0]=0; gradu[:,2,1]=0\n\n return gradu\n\n\ndef deformation_gradient(u,x):\n\n identity = torch.zeros((len(x), 3, 3)); identity[:,0,0]=1; identity[:,1,1]=1; identity[:,2,2]=1\n gradu = displacement_gradient(u, x)\n F = identity + gradu\n\n return F\n \ndef stressNH(F):\n # Material Properties\n lmbdaNH = YM * PR /(1+PR)/(1-2*PR)\n muNH = YM/2/(1+PR)\n Finv = inverse(F)\n detF = determinant(F)\n stressPK = muNH * F + (lmbdaNH * torch.log(detF) - muNH).view(-1,1,1)*Finv.permute(0,2,1)\n return stressPK\n\ndef stressLE( e ):\n lame1 = YM * PR / ( ( 1. + PR ) * ( 1. - 2. * PR ) )\n mu = YM / ( 2. * ( 1. + PR ) ) \n\n identity = torch.zeros((len(e), 3, 3)); identity[:,0,0]=1; identity[:,1,1]=1; identity[:,2,2]=1\n\n trace_e = e[:,0,0] + e[:,1,1] + e[:,2,2]\n return lame1 * torch.einsum( 'ijk,i->ijk' , identity , trace_e ) + 2 * mu * e\n\ndef psi(u_pred, x, integrationIE, dx, dy, dz, shape):\n mu = YM / ( 2. * ( 1. + PR ) )\n K = YM / ( 3. * ( 1. - 2. * PR ) )\n C10 = mu / 2.\n D1 = 2. / K\n # print( C10 , D1 )\n\n F = deformation_gradient(u_pred, x)\n detF = determinant(F)\n F_bar = torch.einsum( 'i,ijk->ijk' , torch.pow( detF , -1./3. ) , F )\n B_bar = torch.bmm( F_bar , F_bar.permute(0,2,1) )\n I1 = trace( B_bar )\n\n psiE = C10 * ( I1 - 3. ) + torch.pow( detF - 1 , 2. ) / D1\n\n internal_1 = integrationIE(psiE, dx=dx, dy=dy, dz=dz, shape=[shape[0], shape[1], shape[2]])\n \n return internal_1\n\ndef psi_Gauss(u, x, integrationIE, dx, dy, dz, shape):\n mu = YM / ( 2. * ( 1. + PR ) )\n K = YM / ( 3. * ( 1. - 2. * PR ) )\n C10 = mu / 2.\n D1 = 2. / K\n\n N_element = ( shape[0] - 1 ) * ( shape[1] - 1 ) * ( shape[2] - 1 )\n order = [ 1 , shape[-1] , shape[0] , shape[1] ]\n Ux = torch.transpose(u[:, 0].reshape( order ), 2, 3)\n Uy = torch.transpose(u[:, 1].reshape( order ), 2, 3)\n Uz = torch.transpose(u[:, 2].reshape( order ), 2, 3)\n U = torch.cat( (Ux,Uy,Uz) , dim=0 )\n\n # dim z y x\n U_N1 = U[ : , :-1 , :-1 , :-1 ]\n U_N2 = U[ : , :-1 , :-1 , 1: ]\n U_N3 = U[ : , 1: , :-1 , 1: ]\n U_N4 = U[ : , 1: , :-1 , :-1 ]\n U_N5 = U[ : , :-1 , 1: , :-1 ]\n U_N6 = U[ : , :-1 , 1: , 1: ]\n U_N7 = U[ : , 1: , 1: , 1: ]\n U_N8 = U[ : , 1: , 1: , :-1 ]\n U_N = torch.stack( [ U_N1 , U_N2 , U_N3 , U_N4 , U_N5 , U_N6 , U_N7 , U_N8 ] )#.double()\n\n # Compute constants\n detJ = dx*dy*dz / 8.\n Jinv = torch.zeros([3,3]).double()\n dxdydz = [ dx , dy , dz ]\n for i in range(3):\n Jinv[i,i] = 2. / dxdydz[i]\n identity = torch.zeros((N_element, 3, 3)); identity[:,0,0]=1; identity[:,1,1]=1; identity[:,2,2]=1\n\n\n # Go through all integration pts\n strainEnergy_at_elem = torch.zeros( N_element )\n\n vv = np.sqrt( 1. / 3. )\n pt = [-vv,vv]\n intpt = torch.tensor([[pt[0],pt[0],pt[0]],\n [pt[1],pt[0],pt[0]],\n [pt[1],pt[1],pt[0]],\n [pt[0],pt[1],pt[0]],\n [pt[0],pt[0],pt[1]],\n [pt[1],pt[0],pt[1]],\n [pt[1],pt[1],pt[1]],\n [pt[0],pt[1],pt[1]]])\n\n for i in range( 8 ):\n x_ , y_ , z_ = intpt[i,:]\n # Shape grad in natural coords\n B = torch.tensor([[-((y_ - 1)*(z_ - 1))/8, -((x_ - 1)*(z_ - 1))/8, -((x_ - 1)*(y_ - 1))/8],\n [ ((y_ - 1)*(z_ - 1))/8, ((x_ + 1)*(z_ - 1))/8, ((x_ + 1)*(y_ - 1))/8],\n [-((y_ - 1)*(z_ + 1))/8, -((x_ + 1)*(z_ + 1))/8, -((x_ + 1)*(y_ - 1))/8],\n [ ((y_ - 1)*(z_ + 1))/8, ((x_ - 1)*(z_ + 1))/8, ((x_ - 1)*(y_ - 1))/8],\n [ ((y_ + 1)*(z_ - 1))/8, ((x_ - 1)*(z_ - 1))/8, ((x_ - 1)*(y_ + 1))/8],\n [-((y_ + 1)*(z_ - 1))/8, -((x_ + 1)*(z_ - 1))/8, -((x_ + 1)*(y_ + 1))/8],\n [ ((y_ + 1)*(z_ + 1))/8, ((x_ + 1)*(z_ + 1))/8, ((x_ + 1)*(y_ + 1))/8],\n [-((y_ + 1)*(z_ + 1))/8, -((x_ - 1)*(z_ + 1))/8, -((x_ - 1)*(y_ + 1))/8]]).double()\n \n # Convert to physical gradient\n B_physical = torch.matmul( B , Jinv ).double()\n dUx = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,0,:,:,:] , B_physical )\n dUy = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,1,:,:,:] , B_physical )\n dUz = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,2,:,:,:] , B_physical )\n grad_u = torch.reshape( torch.transpose( torch.flatten( torch.cat( (dUx,dUy,dUz) , dim=0 ) , start_dim=1, end_dim=-1 ) , 0 , 1 ) , [N_element,3,3] )\n\n # Def grad\n F = grad_u + identity\n\n detF = determinant( F )\n F_bar = torch.einsum( 'i,ijk->ijk' , torch.pow( detF , -1./3. ) , F )\n B_bar = torch.bmm( F_bar , F_bar.permute(0,2,1) )\n I1 = trace( B_bar )\n\n psiE = C10 * ( I1 - 3. ) + torch.pow( detF - 1 , 2. ) / D1\n\n strainEnergy_at_elem += psiE * 1. * detJ \n return torch.sum( strainEnergy_at_elem )\n\ndef LE(u_pred, x, integrationIE, dx, dy, dz, shape):\n grad_u = displacement_gradient(u_pred, x)\n strain = 0.5 * ( grad_u + grad_u.permute(0,2,1) )\n\n stress = stressLE( strain )\n\n psiE = 0.5 * torch.einsum( 'ijk,ijk->i' , stress , strain )\n\n internal_1 = integrationIE(psiE, dx=dx, dy=dy, dz=dz, shape=[shape[0], shape[1], shape[2]])\n \n return internal_1\n\ndef LE_Gauss(u, x, integrationIE, dx, dy, dz, shape):\n N_element = ( shape[0] - 1 ) * ( shape[1] - 1 ) * ( shape[2] - 1 )\n order = [ 1 , shape[-1] , shape[0] , shape[1] ]\n Ux = torch.transpose(u[:, 0].reshape( order ), 2, 3)\n Uy = torch.transpose(u[:, 1].reshape( order ), 2, 3)\n Uz = torch.transpose(u[:, 2].reshape( order ), 2, 3)\n U = torch.cat( (Ux,Uy,Uz) , dim=0 )\n\n # dim z y x\n U_N1 = U[ : , :-1 , :-1 , :-1 ]\n U_N2 = U[ : , :-1 , :-1 , 1: ]\n U_N3 = U[ : , 1: , :-1 , 1: ]\n U_N4 = U[ : , 1: , :-1 , :-1 ]\n U_N5 = U[ : , :-1 , 1: , :-1 ]\n U_N6 = U[ : , :-1 , 1: , 1: ]\n U_N7 = U[ : , 1: , 1: , 1: ]\n U_N8 = U[ : , 1: , 1: , :-1 ]\n U_N = torch.stack( [ U_N1 , U_N2 , U_N3 , U_N4 , U_N5 , U_N6 , U_N7 , U_N8 ] )#.double()\n\n # Compute constants\n detJ = dx*dy*dz / 8.\n Jinv = torch.zeros([3,3]).double()\n dxdydz = [ dx , dy , dz ]\n for i in range(3):\n Jinv[i,i] = 2. / dxdydz[i]\n\n grad2strain = torch.zeros([6,9]).double()\n grad2strain[0,0] = 1. # 11\n grad2strain[1,4] = 1. # 22\n grad2strain[2,8] = 1. # 33\n grad2strain[3,5] = 0.5; grad2strain[3,7] = 0.5 # 23\n grad2strain[4,2] = 0.5; grad2strain[4,6] = 0.5 # 13\n grad2strain[5,1] = 0.5; grad2strain[5,3] = 0.5 # 12 \n\n C_elastic = torch.zeros([6,6]).double()\n C_elastic[0,0] = 1. - PR; C_elastic[0,1] = PR; C_elastic[0,2] = PR\n C_elastic[1,0] = PR; C_elastic[1,1] = 1. - PR; C_elastic[1,2] = PR\n C_elastic[2,0] = PR; C_elastic[2,1] = PR; C_elastic[2,2] = 1. - PR\n C_elastic[3,3] = 1. - 2. * PR;\n C_elastic[4,4] = 1. - 2. * PR;\n C_elastic[5,5] = 1. - 2. * PR;\n C_elastic *= ( YM / ( ( 1. + PR ) * ( 1. - 2. * PR ) ) )\n\n # Go through all integration pts\n strainEnergy_at_elem = torch.zeros( [ shape[-1] -1 , shape[1] -1 , shape[0] -1 ] )\n\n vv = np.sqrt( 1. / 3. )\n pt = [-vv,vv]\n intpt = torch.tensor([[pt[0],pt[0],pt[0]],\n [pt[1],pt[0],pt[0]],\n [pt[1],pt[1],pt[0]],\n [pt[0],pt[1],pt[0]],\n [pt[0],pt[0],pt[1]],\n [pt[1],pt[0],pt[1]],\n [pt[1],pt[1],pt[1]],\n [pt[0],pt[1],pt[1]]])\n\n for i in range( 8 ):\n x_ , y_ , z_ = intpt[i,:]\n # Shape grad in natural coords\n B = torch.tensor([[-((y_ - 1)*(z_ - 1))/8, -((x_ - 1)*(z_ - 1))/8, -((x_ - 1)*(y_ - 1))/8],\n [ ((y_ - 1)*(z_ - 1))/8, ((x_ + 1)*(z_ - 1))/8, ((x_ + 1)*(y_ - 1))/8],\n [-((y_ - 1)*(z_ + 1))/8, -((x_ + 1)*(z_ + 1))/8, -((x_ + 1)*(y_ - 1))/8],\n [ ((y_ - 1)*(z_ + 1))/8, ((x_ - 1)*(z_ + 1))/8, ((x_ - 1)*(y_ - 1))/8],\n [ ((y_ + 1)*(z_ - 1))/8, ((x_ - 1)*(z_ - 1))/8, ((x_ - 1)*(y_ + 1))/8],\n [-((y_ + 1)*(z_ - 1))/8, -((x_ + 1)*(z_ - 1))/8, -((x_ + 1)*(y_ + 1))/8],\n [ ((y_ + 1)*(z_ + 1))/8, ((x_ + 1)*(z_ + 1))/8, ((x_ + 1)*(y_ + 1))/8],\n [-((y_ + 1)*(z_ + 1))/8, -((x_ - 1)*(z_ + 1))/8, -((x_ - 1)*(y_ + 1))/8]]).double()\n \n # Convert to physical gradient\n B_physical = torch.matmul( B , Jinv ).double()\n dUx = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,0,:,:,:] , B_physical )\n dUy = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,1,:,:,:] , B_physical )\n dUz = torch.einsum( 'ijkl,iq->qjkl' , U_N[:,2,:,:,:] , B_physical )\n dU = torch.cat( (dUx,dUy,dUz) , dim=0 )\n\n # Strain [ 11 , 22 , 33 , 23 , 13 , 12 ]\n eps = torch.einsum( 'qi,ijkl->qjkl' , grad2strain , dU )\n\n # Stress [ 11 , 22 , 33 , 23 , 13 , 12 ]\n Cauchy = torch.einsum( 'qi,ijkl->qjkl' , C_elastic , eps )\n\n # Shear stresses need to be counted twice due to symmetry\n Cauchy[3:,:,:,:] *= 2.\n SE = 0.5 * torch.einsum( 'ijkl,ijkl->jkl' , Cauchy , eps ) \n\n # Scaled by design density\n strainEnergy_at_elem += SE * 1. * detJ \n return torch.sum( strainEnergy_at_elem )\n\ndef CauchyStress(P, F):\n\n detF = determinant(F)\n sigma = torch.pow(detF,-1).view(-1,1,1) * torch.bmm(P,F.permute(0,2,1)) \n return sigma\n\ndef strain(F):\n\n identity = torch.zeros((len(F), 3, 3)); identity[:,0,0]=1; identity[:,1,1]=1; identity[:,2,2]=1\n C = torch.bmm(F.permute(0,2,1),F)\n strainCG = 0.5 * (C-identity)\n \n return strainCG\n\ndef ConvergenceCheck( arry , rel_tol ):\n num_check = 10\n\n if HyperOPT and arry[-1] < -4.:\n print('Solution diverged!!!!!!!')\n return True\n\n\n # Run minimum of 2*num_check iterations\n if len( arry ) < 2 * num_check :\n return False\n\n mean1 = np.mean( arry[ -2*num_check : -num_check ] )\n mean2 = np.mean( arry[ -num_check : ] )\n\n if np.abs( mean2 ) < 1e-6:\n print('Loss value converged to abs tol of 1e-6' )\n return True \n\n if ( np.abs( mean1 - mean2 ) / np.abs( mean2 ) ) < rel_tol:\n print('Loss value converged to rel tol of ' + str(rel_tol) )\n return True\n else:\n return False\n\n\nclass DeepMixedMethod:\n # Instance attributes\n def __init__(self, model):\n self.GCNet = GCNet( model[0], model[1], model[2] , model[4] )\n self.GCNet = self.GCNet.to(device)\n numIntType = 'AD'# 'AD' 'trapezoidal'\n self.intLoss = IntegrationLoss(numIntType, 3)\n self.lr = model[3]\n\n def train_model(self): \n integrationIE = self.intLoss.lossInternalEnergy\n integrationEE = self.intLoss.lossExternalEnergy\n torch.set_printoptions(precision=8) \n x_scaled, edge_index = nodes.x, nodes.edge_index\n x_coord = nodes.nodes\n x_coord.requires_grad_(True); x_coord.retain_grad()\n\n optimizerL = torch.optim.LBFGS(self.GCNet.parameters(), lr=self.lr, max_iter=200, line_search_fn='strong_wolfe', tolerance_change=1e-6, tolerance_grad=1e-6)\n \n LOSS = {}\n disp_history = np.zeros((step_max+1,nodes.num_nodes, 3))\n strainCG_histroy = np.zeros((step_max+1,nodes.num_nodes, 3, 3))\n stressC_histroy = np.zeros((step_max+1,nodes.num_nodes, 3, 3))\n electric_potential_M2 = torch.zeros( nodes.num_nodes )\n E_field = torch.zeros((nodes.num_nodes, 3))\n\n for step in range(1,step_max+1):\n self.applied_trac = step/step_max * total_traction\n tempL = []\n for epoch in range(epochs):\n def closure():\n loss = self.loss_function(step, epoch, x_scaled, edge_index, x_coord, X1_idx,self.applied_trac, integrationIE, integrationEE)\n optimizerL.zero_grad()\n loss.backward(retain_graph=True)\n tempL.append(loss.item())\n return loss\n optimizerL.step(closure)\n\n # Check convergence\n if ConvergenceCheck( tempL , rel_tol ):\n break\n\n LOSS[step] = tempL\n \n \n u_pred = self.getU(x_coord, edge_index) \n disp_history[step,:, :] = u_pred[:,:3].detach().cpu().numpy()\n\n if Example == 'Hyperelastic': \n F_M2 = deformation_gradient(u_pred, x_coord)\n strainCG_M2 = strain(F_M2)\n stressPK_M2 = stressNH(F_M2)\n stressC_M2 = CauchyStress(stressPK_M2, F_M2)\n\n elif Example == 'Elastic':\n grad_u = displacement_gradient(u_pred, x_coord)\n strainCG_M2 = 0.5 * ( grad_u + grad_u.permute(0,2,1) )\n stressC_M2 = stressLE( strainCG_M2 )\n\n stressC_histroy[step,:, :, :] = stressC_M2.detach().cpu().numpy()\n strainCG_histroy[step,:, :, :] = strainCG_M2.detach().cpu().numpy()\n \n return disp_history, strainCG_histroy, stressC_histroy, stressC_M2, strainCG_M2 , electric_potential_M2 , E_field , LOSS\n\n def getU(self, x_coord, edge_index):\n u = self.GCNet.forward(x_coord, edge_index).double()\n\n Ux = x_coord[:, 0] * u[:, 0]\n Uy = x_coord[:, 0] * u[:, 1]\n Uz = x_coord[:, 0] * u[:, 2]\n Ux = Ux.reshape(Ux.shape[0], 1)\n Uy = Uy.reshape(Uy.shape[0], 1)\n Uz = Uz.reshape(Uz.shape[0], 1)\n u_pred = torch.cat((Ux, Uy, Uz), -1)\n return u_pred\n\n def loss_function(self, step, epoch, x_scaled, edge_index, x_coord, X1_idx,traction, integrationIE, integrationEE):\n U = self.getU(x_coord, edge_index)\n\n if Example == 'Hyperelastic':\n if INT_TYPE == 'AD':\n internal = psi(U, x_coord, integrationIE, dx, dy, dz, shape)\n else:\n internal = psi_Gauss(U, x_coord, integrationIE, dx, dy, dz, shape)\n elif Example == 'Elastic':\n if INT_TYPE == 'AD':\n internal = LE(U, x_coord, integrationIE, dx, dy, dz, shape)\n else:\n internal = LE_Gauss(U, x_coord, integrationIE, dx, dy, dz, shape)\n\n\n neu_uP_pred = U[X1_idx,:3]\n neu_u_pred = neu_uP_pred[:,1]\n fext = traction * neu_u_pred \n\n external = integrationEE(fext, dx=dy, dy=dz, shape=[shape[1], shape[2]])\n L_E = internal - external\n loss = L_E\n print('Step = '+ str(step) + ', Epoch = ' + str( epoch) + ', L = ' + str( loss.item() ) )\n return loss\n \n\n\nglobal Example\nExample = 'Hyperelastic'\n# Example = 'Elastic'\n\nINT_TYPE = 'AD'\n# INT_TYPE = 'SF'\n\n\nprint('Example = ' + Example + ', using ' + INT_TYPE )\nbase = './' + Example + '_' + INT_TYPE + '/'\nif not os.path.exists( base ):\n os.mkdir( base )\n\n\n# ----------------------------- define structural parameters ----------------------------------------\nLength = 4.0\nWidth = 1.0\nDepth = 1.0\nnumb_nodes_cont_param = 10\nNy = numb_nodes_cont_param\nNz = numb_nodes_cont_param\nNx = int((numb_nodes_cont_param-1) * int(Length/Width) + 1)\n\n# Nx = 44; Ny = 13; Nz = 13\n\nshape = [Nx, Ny, Nz]\nprint( shape )\n\nx_min, y_min, z_min = (0.0, 0.0, 0.0)\n(dx, dy, dz) = (Length / (Nx - 1), Width / (Ny - 1), Depth / (Nz - 1))\n\n# --------------------- Graph and Data ---------------------------\nG, Boundaries = setup_domain()\nprint('# of nodes is ', G.number_of_nodes())\nprint('# of X1 surface nodes is ', len(Boundaries['X1']))\nprint('# of X2 surface nodes is ', len(Boundaries['X2']))\nprint('# of Y1 surface nodes is ', len(Boundaries['Y1']))\nprint('# of Y2 surface nodes is ', len(Boundaries['Y2']))\nprint('# of Z1 surface nodes is ', len(Boundaries['Z1']))\nprint('# of Z2 surface nodes is ', len(Boundaries['Z2']))\n\n# create edge index\nadj = nx.to_scipy_sparse_array(G).tocoo()\nrow = torch.from_numpy(adj.row.astype(np.int64)).to(torch.long)\ncol = torch.from_numpy(adj.col.astype(np.int64)).to(torch.long)\nedge_index = torch.stack([row,col], dim=0)\n\ndataset = MeshDataSet()\ndata = dataset[0]\nnodes = data.to(device)\nX1_idx = Boundaries['X1'].to(device)\n# ----------------------------- End ----------------------------\n# --------------------------------------------------------------\n# --------------------------------------------------------------\n\n# ----------------------- network settings -----------------\nD_in = 3\nH = 16\nD_out = 3\n\n# Material Parameters\nYM = 1000\nPR = 0.3\n\n# Loading\ntotal_traction = -25.\nstep_max = 50\nref_file = './AbaqusReferenceDisplacements/' + 'NH_Disp25_'\n\n# Training\nepochs = 20\nrel_tol = 5e-5\n\n\n# Initial hyper parameters\nx_var = { 'x_lr' : 0.01 ,\n 'neuron' : 16 ,\n 'act_func' : 'tanh' }\n\ndef Obj( x_var ):\n lr = x_var['x_lr']\n H = int(x_var['neuron'])\n act_fn = x_var['act_func']\n print( 'LR: ' + str(lr) + ', H: ' + str(H) + ', act fn: ' + act_fn )\n\n gem = DeepMixedMethod([D_in, H, D_out,lr,act_fn])\n start_time = time.time()\n disp_history, strainCG_histroy, stressC_history, stressC_last, strain_last , electric_potential_last , E_field , LOSS = gem.train_model()\n end_time = time.time()\n print('simulation time = ' + str(end_time - start_time) + 's')\n\n #######################################################################################################################################\n # Save data\n x_space = np.expand_dims(nodes.nodes[:,0].detach().cpu().numpy(), axis=1)\n y_space = np.expand_dims(nodes.nodes[:,1].detach().cpu().numpy(), axis=1)\n z_space = np.expand_dims(nodes.nodes[:,2].detach().cpu().numpy(), axis=1)\n coordin = np.concatenate((x_space, y_space, z_space), axis=1)\n U = disp_history[-1,:,:]\n\n Nodal_Strain = torch.cat((strain_last[:,0,0].unsqueeze(1),strain_last[:,1,1].unsqueeze(1),strain_last[:,2,2].unsqueeze(1),\\\n strain_last[:,0,1].unsqueeze(1),strain_last[:,1,2].unsqueeze(1),strain_last[:,0,2].unsqueeze(1)),axis=1)\n Nodal_Stress = torch.cat((stressC_last[:,0,0].unsqueeze(1),stressC_last[:,1,1].unsqueeze(1),stressC_last[:,2,2].unsqueeze(1),\\\n stressC_last[:,0,1].unsqueeze(1),stressC_last[:,1,2].unsqueeze(1),stressC_last[:,0,2].unsqueeze(1)),axis=1)\n Nodal_E = torch.cat((E_field[:,0].unsqueeze(1),E_field[:,1].unsqueeze(1),E_field[:,2].unsqueeze(1)),axis=1)\n\n stress_vMis = torch.pow(0.5 * (torch.pow((Nodal_Stress[:,0]-Nodal_Stress[:,1]), 2) + torch.pow((Nodal_Stress[:,1]-Nodal_Stress[:,2]), 2)\n + torch.pow((Nodal_Stress[:,2]-Nodal_Stress[:,0]), 2) + 6 * (torch.pow(Nodal_Stress[:,3], 2) +\n torch.pow(Nodal_Stress[:,4], 2) + torch.pow(Nodal_Stress[:,5], 2))), 0.5)\n Nodal_Strain = Nodal_Strain.cpu().detach().numpy()\n Nodal_Stress = Nodal_Stress.cpu().detach().numpy()\n Nodal_E = Nodal_E.cpu().detach().numpy()\n stress_vMis = stress_vMis.unsqueeze(1).cpu().detach().numpy()\n electric_potential = electric_potential_last.unsqueeze(1).cpu().detach().numpy()\n\n\n Data = np.concatenate((coordin, U, Nodal_Strain , Nodal_Stress, stress_vMis, electric_potential , Nodal_E ), axis=1)\n np.save( base + 'Results.npy',Data)\n\n LBFGS_loss_D1 = np.array(LOSS[1])\n fn = base + 'Training_loss.npy'\n np.save( fn , LBFGS_loss_D1 )\n\n\n\n #######################################################################################################################################\n # Write vtk\n def FormatMe( v ):\n S = [Nz,Nx,Ny]\n return np.swapaxes( np.swapaxes( v.reshape(S) , 0 , 1 ) , 1 , 2 ).flatten('F')\n\n grid = pv.UniformGrid()\n grid.dimensions = np.array([Nx,Ny,Nz])\n grid.origin = np.zeros(3)\n grid.spacing = np.array([dx,dy,dz])\n names = [ 'Ux' , 'Uy' , 'Uz' , 'E11' , 'E22' , 'E33' , 'E12' , 'E23' , 'E13' , 'S11' , 'S22' , 'S33' , 'S12' , 'S23' , 'S13' , 'SvM' , 'E_pot' , 'D1' , 'D2' , 'D3' ]\n for idx , n in enumerate( names ):\n grid.point_data[ n ] = FormatMe( Data[:,idx+3] )\n\n #############################################################################################\n # Abaqus comparison\n Out = np.load( ref_file + '.npy' )\n\n names = [ 'Ux_ABQ' , 'Uy_ABQ' , 'Uz_ABQ' ]\n for idx , n in enumerate( names ):\n grid.point_data[ n ] = Out[idx].flatten('F')\n\n # Compute difference\n names = [ 'Ux' , 'Uy' , 'Uz' ]\n diff = []\n for idx , n in enumerate( names ):\n FEM = grid.point_data[ n + '_ABQ' ]\n ML = grid.point_data[ n ]\n grid.point_data[ n + '_diff' ] = np.abs( FEM - ML ) / np.max( np.abs(FEM) ) * 100.\n diff.append( np.mean(grid.point_data[ n + '_diff' ]) )\n grid.save( base + \"Results.vti\")\n\n mE = np.mean(diff)\n print( 'Mean error in U compared to Abaqus: ' + str(mE) )\n return np.mean(diff)\nObj( x_var )", "repo_name": "Jasiuk-Research-Group/Graph_DEM", "sub_path": "GCN-DEM.py", "file_name": "GCN-DEM.py", "file_ext": "py", "file_size_in_byte": 31942, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "33", "api": [{"api_name": "torch.__version__", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.set_default_tensor_type", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.nditer", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.nditer", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.pdist", "line_number": 102, "usage_type": "call"}, {"api_name": "networkx.from_numpy_matrix", "line_number": 104, "usage_type": "call"}, {"api_name": "networkx.draw", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "torch_geometric.data.InMemoryDataset", "line_number": 124, "usage_type": "name"}, {"api_name": "torch_geometric.data.Data", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 152, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.ChebConv", "line_number": 182, "usage_type": "call"}, {"api_name": "torch_geometric.nn.ChebConv", "line_number": 183, "usage_type": "call"}, {"api_name": "torch_geometric.nn.ChebConv", "line_number": 184, "usage_type": "call"}, {"api_name": "torch_geometric.nn.ChebConv", "line_number": 185, "usage_type": "call"}, {"api_name": "torch_geometric.nn.ChebConv", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 231, "usage_type": "attribute"}, {"api_name": "torch.tanh", "line_number": 234, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 235, "usage_type": "attribute"}, {"api_name": "torch.nn.RReLU", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "attribute"}, {"api_name": "torch.sigmoid", "line_number": 237, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 267, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 268, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 269, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 275, "usage_type": "argument"}, {"api_name": "torch.empty", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 276, "usage_type": "argument"}, {"api_name": "torch.nn.functional", "line_number": 277, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 278, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 279, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 280, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 281, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 282, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 283, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 284, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 285, "usage_type": "name"}, {"api_name": "torch.empty", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 321, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 323, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 329, "usage_type": "argument"}, {"api_name": "torch.nn.functional", "line_number": 330, "usage_type": "argument"}, {"api_name": "torch.nn.functional", "line_number": 331, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 331, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 341, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 350, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 351, "usage_type": "argument"}, {"api_name": "torch.einsum", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 352, "usage_type": "argument"}, {"api_name": "torch.pow", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 353, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 370, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 372, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 384, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 392, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 398, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 400, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 412, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 422, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 423, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 424, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 425, "usage_type": "call"}, {"api_name": "torch.reshape", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 426, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 429, "usage_type": "name"}, {"api_name": "torch.nn.functional", "line_number": 431, "usage_type": "argument"}, {"api_name": "torch.einsum", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 432, "usage_type": "argument"}, {"api_name": "torch.pow", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 433, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 436, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 439, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 447, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 456, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 457, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 458, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 459, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 470, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 474, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 479, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 487, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 499, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 501, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 513, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 523, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 524, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 525, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 526, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 527, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 530, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 533, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 537, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 541, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 545, "usage_type": "argument"}, {"api_name": "torch.pow", "line_number": 546, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 546, "usage_type": "call"}, {"api_name": "torch.nn.functional.permute", "line_number": 546, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 546, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 551, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 551, "usage_type": "argument"}, {"api_name": "torch.bmm", "line_number": 552, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 552, "usage_type": "argument"}, {"api_name": "torch.nn.functional.permute", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 576, "usage_type": "call"}, {"api_name": "torch.set_printoptions", "line_number": 595, "usage_type": "call"}, {"api_name": "torch.optim.LBFGS", "line_number": 600, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 600, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 604, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 605, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 606, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 607, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 656, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 696, "usage_type": "call"}, {"api_name": "os.path", "line_number": 696, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 697, "usage_type": "call"}, {"api_name": "networkx.to_scipy_sparse_array", "line_number": 728, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 729, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 729, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 729, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 730, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 730, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 730, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 731, "usage_type": "call"}, {"api_name": "time.time", "line_number": 772, "usage_type": "call"}, {"api_name": "time.time", "line_number": 774, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 779, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 780, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 782, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 785, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 787, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 789, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 791, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 792, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 793, "usage_type": "call"}, {"api_name": "torch_geometric.data.Data", "line_number": 801, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 802, "usage_type": "call"}, {"api_name": "torch_geometric.data.Data", "line_number": 802, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 804, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 806, "usage_type": "call"}, {"api_name": "numpy.swapaxes", "line_number": 814, "usage_type": "call"}, {"api_name": "pyvista.UniformGrid", "line_number": 816, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 817, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 818, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 819, "usage_type": "call"}, {"api_name": "torch_geometric.data.Data", "line_number": 822, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 826, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 839, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 842, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 844, "usage_type": "call"}]} +{"seq_id": "9462780794", "text": "'''\n\nPython implementation of a recurrent neural network for generation of music\nsequences using LSTM cells in Tensorflow.\n\nDate: 28.02.2017\n\nNotes:\nSince we use parts of the non-stable tensorflow.contrib library future\nfunctionality can not be guaranteed.\n\n'''\n\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport numpy as np\nimport scipy.io.wavfile as wavfile\nimport matplotlib.pyplot as plt\n\n\n# ------------------------------------------------------------------------------\n# PARAMETERS\n# ------------------------------------------------------------------------------\n\n\n# - GENERAL\nTRAINING_PHASE = True # specialize if training or testing phase\ninput_numbers = 60719 # how many samples we have in our dataset\nframe_rate = 22050 # we are using 22,05kHz samples\n\n# - PARAMETERS FOR TRAINING\nnumber_of_epochs = 3 # number of training epochs\nnumber_of_mini_batches = 150 # how many mini batches we want to feed in one epoch\nmini_batch_size = 40 # how many sound samples we want to feed in one mini batch\ntime_steps = 50 # in how many timesteps we want to split one sound sample\neval_time_steps = 2 # how many timesteps of the input we want to use for backpropagation\nstore_directory = \"wav-snippets\"# directory in which to look for files\nweights_directory = \"./weights/\" # directory in which to store session weights\n\n# - PARAMETERS FOR NETWORK DEFINITION\nlearning_rate = 0.001 # initial learning rate for the backpropagation\nlearning_adapt_rate = 5 # scale for how fast the learning rate will adapt\nsegment_length = 10 # samples are 10 seconds long\nn_in_time_steps = time_steps-1 # save for backpropagation\nn_units = 2048 # number of LSTM units\nn_input = frame_rate*segment_length/time_steps*2 # datapoints in 1/5th of a second for 22.05kHz samples\nn_output = int(frame_rate*segment_length/time_steps)*2 # size of the linear layer output vector\n\n# - FEEDS FOR THE NETWORK\nx = tf.placeholder(tf.float32, # network data input\n (None, n_in_time_steps, n_input))\ny = tf.placeholder(tf.float32, # training/backprop input\n (None, eval_time_steps, n_output))\n\n\n# ------------------------------------------------------------------------------\n# FUNCTIONS\n# ------------------------------------------------------------------------------\n\n\ndef fourierTransform(input_signal):\n '''\n creates the fourier transform from pcm input and normalizes it using\n the sklearn preprocessing library\n ---\n @params\n input_signal: array containing the sound data to be processed\n @returns\n fourier: array containing the real parts of the fourier transform\n in its first half, imag parts in its seconds half\n '''\n # use numpy's fourier transform library to calculate fourier transform\n fourier_signal = np.fft.fft(input_signal)\n # put real part into the first half, imag part in the second half\n fourier = np.concatenate((fourier_signal.real, fourier_signal.imag))\n # decrease to values that float32 can handle and cast to float32\n fourier = (fourier/np.amax(fourier)).astype('float32')\n # normalize using sklearn\n fourier = prep.normalize([fourier])\n # reshape to fit network demands\n fourier = fourier.reshape((1,-1)).reshape(-1)\n return fourier\n\ndef invFourierTransform(output_signal):\n '''\n creates the inverse fourier transform from float32 input array\n ---\n @params\n output_signal: array containing the sound data to be processed\n first half contains real parts, seconds half contains imag parts\n @returns\n output_signal.real: array containing the real parts of the audio signal\n '''\n output_signal = output_signal / np.amax(np.absolute(output_signal))\n (myreals, myimags) = np.split(output_signal,2)\n output_signal = np.vectorize(complex)(myreals, myimags)\n output_signal = np.real(np.fft.ifft(output_signal))\n output_signal = output_signal / np.amax(np.absolute(output_signal))\n output_signal = (output_signal*127)+128\n output_signal = output_signal.astype('uint8')\n return output_signal.real\n\ndef generateBatch(mini_batch_size, is_used_vector):\n '''\n generates a random input and validation batch from a defined file\n directory to be fed into the network\n ---\n @params\n mini_batch_size: the size of the mini batch we want to generate\n is_used_vector: array of size \"input_numbers\" containing bools depicting\n whether the sound sample at index has already been used for training\n True: has not been used; False: has been used already\n @returns\n batch_x: python array containing input values for the network\n batch_y: python array containing values for backpropagation\n is_used_vector: altered state of the is_used_vector\n '''\n batch_x, batch_y = [], []\n # For each element to be used in this batch\n for i in range(mini_batch_size):\n while True:\n # we want a ranodom sound sample from the dataset\n random_index = np.random.randint(0,input_numbers)\n # iff it has not been used before.\n if is_used_vector[random_index]:\n one_sound_sample = getSoundSample(\n store_directory, \"musicdata_{}.wav\".format(random_index))\n # Need to apply fourier transform timestep-wise\n for i, pcm in enumerate(one_sound_sample):\n one_sound_sample[i] = fourierTransform(pcm)\n # From that sample we want all but the last timestep as network input\n sample_part_for_network = one_sound_sample[:-1]\n # And the last $eval_time_steps will be used for backpropagation\n sample_part_for_backprop = one_sound_sample[-eval_time_steps:]\n batch_x.append(sample_part_for_network)\n batch_y.append(sample_part_for_backprop)\n # Note that this sample has been used now\n is_used_vector[random_index] = False\n break\n return batch_x, batch_y, is_used_vector\n\ndef getSoundSample(store_directory, sample_name):\n '''\n reads a sound sample into python using scipy and segments it into\n segments of desired length\n ---\n @params\n store_directory: str, the directory in which to look for the file to import\n sample_name: str, the name of the file to import\n @returns\n one_sound_sample: the segmented sound sample as numpy array\n '''\n one_sound_sample = wavfile.read(\n \"./{}/{}\".format(store_directory, sample_name))[1]\n one_sound_sample = one_sound_sample.astype(\"float32\")\n one_sound_sample = (one_sound_sample/128)-0.5\n one_sound_sample = np.array_split(one_sound_sample, time_steps)\n return one_sound_sample\n\n\n# ------------------------------------------------------------------------------\n# DATA FLOW GRAPH\n# ------------------------------------------------------------------------------\n\n\n# Define our LSTM cell from tensorflow's contrib library with variable number of units\ncell = tf.contrib.rnn.BasicLSTMCell(n_units)\n\n# Define the zero state of the cell\ninitial_state = cell.zero_state(mini_batch_size, tf.float32)\n\n# Launch dynamic RNN Network with specified cell and initial state\n# We use time_major=False because we would need to transpose the input on our own otherwise\nrnn_outputs, _rnn_states = tf.nn.dynamic_rnn(cell, x,\n initial_state=initial_state, time_major=False)\n\n# Get the last $eval_time_steps timestep(s) for training\nrnn_outputs_on_last_t_step = tf.slice(\n rnn_outputs,\n [0, n_in_time_steps - (1+eval_time_steps), 0],\n [mini_batch_size, eval_time_steps, n_units])\n\n# Project output from rnn output size to n_output\nfinal_projection = lambda z: layers.linear(z, num_outputs=n_output,\n activation_fn=tf.nn.sigmoid)\n\n# Apply projection to every time step\npredicted = tf.map_fn(final_projection, rnn_outputs_on_last_t_step)\n\n# Error and backprop\nerror = tf.nn.l2_loss(tf.subtract(tf.abs(y),tf.abs(predicted)))\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(error)\n\n# Prediction error and accuracy\naccuracy = tf.reduce_mean(tf.subtract(tf.abs(y),tf.abs(predicted)))\n\n\n#-------------------------------------------------------------------------------\n# RUN THE NETWORK\n#-------------------------------------------------------------------------------\n\n\nif TRAINING_PHASE:\n\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n\n # Iterate through all epochs\n for epoch in range(number_of_epochs):\n\n # Reset the is_used_vector to state that no sample has been used yet\n is_used_vector = np.ones(input_numbers+1).astype(bool)\n\n # Array variables we use for testing the network\n error_function = np.zeros(number_of_mini_batches)\n accuracy_function = np.zeros(number_of_mini_batches)\n learning_rate_array = np.zeros(number_of_mini_batches)\n\n # Training loop\n for mini_batch_number in range(number_of_mini_batches):\n # Generate an input and backprop batch and update the is_used_vector\n batch_x, batch_y, is_used_vector = generateBatch(mini_batch_size, is_used_vector)\n\n training_accuracy, prediction_error, _ = session.run(\n [accuracy,\n error,\n train_step],\n feed_dict = {x: batch_x, y: batch_y})\n\n # Adapt learning rate\n if mini_batch_number > 10:\n learning_rate = np.mean(accuracy_function[-3:])/learning_adapt_rate\n if ((epoch > 0) | (mini_batch_number > 10)):\n if (np.absolute(np.absolute(learning_rate_array[mini_batch_number]) - np.absolute(learning_rate_array[mini_batch_number-1])) > (np.absolute(learning_rate_array[mini_batch_number-1]) / 10)):\n learning_rate = (((2*learning_rate_array[mini_batch_number-1])+learning_rate_array[mini_batch_number])/3)\n if learning_rate < 0.000001:\n learning_rate = 0.000001\n\n # Feed into arrays for plotting\n learning_rate_array[mini_batch_number] = learning_rate\n error_function[mini_batch_number] = prediction_error\n accuracy_function[mini_batch_number] = training_accuracy\n\n print(\"Training accuracy and prediction error in batch {}: {}, {}\".format(\n mini_batch_number, training_accuracy, prediction_error))\n\n # Save the weights at the end of each epoch\n saver.save(session, 'LSTM-weights', global_step=epoch)\n\n # Plots for network optimization at end of each epoch\n plt.subplot(311)\n plt.xlabel(\"Batch number\")\n plt.ylabel(\"L2 error\")\n plt.plot(error_function)\n plt.subplot(312)\n plt.xlabel(\"Batch number\")\n plt.ylabel(\"Accuracy: mean difference between data point\")\n plt.plot(accuracy_function)\n plt.subplot(313)\n plt.xlabel(\"Batch number\")\n plt.ylabel(\"Learning rate\")\n plt.plot(learning_rate_array)\n plt.show()\n\nelse: # if not training phase but generation\n\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(session, \"{}LSTM-weights-0\".format(weights_directory))\n\n desired_length = 20 # seconds\n sample_name = \"musicdata_300.wav\"\n generation_length = desired_length * (time_steps/10)\n\n seed_sequence = getSoundSample(store_directory, sample_name)\n working_sequence = seed_sequence\n generated_sequence = seed_sequence\n\n for steps in range(generation_length):\n network_prediction = session.run(\n predicted,\n feed_dict = {x: sound_sample})\n network_prediction = invFourierTransform(network_prediction)\n working_sequence = working_sequence[1:]\n working_sequence.append(network_prediction[0])\n generated_sequence.append(network_prediction[0])\n sound_sample = working_sequence\n\n generated_sequence = np.concat(generated_sequence)\n wavfile.write(\"MyTestSequence.wav\", 22050, generated_sequence)\n", "repo_name": "verrannt/TF_LSTMs_MusicGeneration", "sub_path": "LSTM_music_gen.py", "file_name": "LSTM_music_gen.py", "file_ext": "py", "file_size_in_byte": 12901, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "42", "api": [{"api_name": "tensorflow.placeholder", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.fft.fft", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.fft.ifft", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.amax", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 124, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile.read", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.array_split", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.contrib.rnn.BasicLSTMCell", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 168, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 171, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dynamic_rnn", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tensorflow.slice", "line_number": 179, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.linear", "line_number": 185, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers", "line_number": 185, "usage_type": "name"}, {"api_name": "tensorflow.nn", "line_number": 186, "usage_type": "attribute"}, {"api_name": "tensorflow.map_fn", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.nn.l2_loss", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tensorflow.subtract", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 208, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "tensorflow.Session", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 271, "usage_type": "attribute"}, {"api_name": "numpy.concat", "line_number": 292, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 293, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 293, "usage_type": "name"}]} +{"seq_id": "6485685173", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"entro_py_min\",\n version=\"0.0.1\",\n author=\"Conrad Großer\",\n author_email=\"grosserconrad@gmail.com\",\n description=\"Small Information Entropy Calculator\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/creyD/entro.py\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n", "repo_name": "creyD/entro.py", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "42", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "35640935205", "text": "from rest_framework.serializers import Serializer\nfrom productivity.models import Jobs\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom productivity.serializers import JobsSerializers\nfrom rest_framework import status\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom Dashboards.signals import object_viewed_signal, detail_object_viewed_signal\nfrom rest_framework.pagination import PageNumberPagination\n\n\n@api_view(['GET'])\ndef JobsListMainList(request):\n query = request.query_params.get('keyword')\n if query ==None:\n query = \"\"\n\n jobs = Jobs.objects.filter(title__icontains=query).order_by('-id')\n\n count = jobs.count()\n\n #Pagination\n resPerPage = 8\n\n paginator = PageNumberPagination()\n paginator.page_size = resPerPage\n\n queryset = paginator.paginate_queryset(jobs, request)\n\n serializer = JobsSerializers(queryset, many=True)\n # object_viewed_signal.send(local.__class__, instance=\"LocalNews\", request=request)\n return Response({\"jobs\":serializer.data, \"count\":count, \"resPerPage\":resPerPage})\n\n\n@api_view(['GET'])\ndef JobsList(request):\n query = request.query_params.get('keyword')\n if query ==None:\n query = \"\"\n\n\n jobs = Jobs.objects.filter(title__icontains=query, isApproved=True).order_by('-flag', '-createdAt')\n\n page = request.query_params.get('page')\n paginator = Paginator(jobs, 6)\n try:\n jobs = paginator.page(page)\n except EmptyPage:\n jobs = paginator.page(paginator.num_pages)\n except PageNotAnInteger:\n jobs = paginator.page(1)\n \n\n if page == None:\n page = 1\n page = int(page)\n\n\n serializer = JobsSerializers(jobs, many=True)\n return Response({\"jobs\":serializer.data, \"page\":page, \"pages\": paginator.num_pages})\n\n\ndef JobsViewsUpdate(pk, slug):\n jobs = Jobs.objects.get(pk=pk,slug=slug)\n jobs.views += 1\n jobs.save()\n\n@api_view(['GET'])\ndef JobsDetailList(request, pk, slug):\n jobs = Jobs.objects.get(pk=pk,slug=slug)\n JobsViewsUpdate(pk, slug)\n serializer = JobsSerializers(jobs, many=False)\n detail_object_viewed_signal.send(jobs.__class__, instance=jobs, request=request)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef JobsCreate(request):\n data = request.data\n current_user = request.user\n jobs = Jobs.objects.create(\n user =current_user,\n category=\"\",\n country='',\n state= '',\n address='',\n contact='',\n image= '',\n title= 'jobs',\n content=''\n )\n serializer = JobsSerializers(jobs, many=False)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef JobsUpdate(request, pk, slug):\n data = request.data\n jobs = Jobs.objects.get(pk=pk, slug=slug)\n jobs.category =data['category']\n jobs.country = data['country']\n jobs.state = data['state']\n jobs.address = data['address']\n jobs.contact = data['contact']\n jobs.startDate=data['startDate']\n jobs.endDate=data['endDate']\n jobs.title = data['title']\n jobs.content = data['content']\n jobs.save()\n serializer = JobsSerializers(jobs, many=False)\n return Response(serializer.data)\n\n\n\n@api_view(['PUT'])\n@permission_classes([IsAdminUser])\ndef JobsAdminUpdate(request, pk):\n data = request.data\n jobs = Jobs.objects.get(pk=pk)\n jobs.category =data['category']\n jobs.country = data['country']\n jobs.state = data['state']\n jobs.address = data['address']\n jobs.contact = data['contact']\n # jobs.image=data['image']\n jobs.title = data['title']\n jobs.content = data['content']\n jobs.isApproved = data['isApproved']\n jobs.save()\n serializer = JobsSerializers(jobs, many=False)\n return Response(serializer.data)\n\n\n\n@api_view(['DELETE', 'GET'])\n@permission_classes([IsAuthenticated])\ndef JobsDelete(request, pk, slug):\n if request.method == 'GET':\n jobs = Jobs.objects.get(pk=pk, slug=slug)\n serializer = JobsSerializers(jobs, many=False)\n return Response(serializer.data)\n\n elif request.method == 'DELETE':\n jobs = Jobs.objects.get(pk=pk, slug=slug)\n jobs.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['POST'])\ndef JobsImage(request):\n data = request.data\n\n product_id = data['product_id']\n jobs = Jobs.objects.get(id=product_id)\n\n jobs.image = request.FILES.get('image')\n jobs.save()\n\n serializer = JobsSerializers(jobs, many=False)\n \n return Response(serializer.data)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef UserJobsList(request):\n current_user = request.user\n jobs = Jobs.objects.filter(user=current_user)\n serializer = JobsSerializers(jobs, many=True)\n return Response(serializer.data)", "repo_name": "31519/my_town_main", "sub_path": "productivity/views/jobs_views.py", "file_name": "jobs_views.py", "file_ext": "py", "file_size_in_byte": 4949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "productivity.models.Jobs.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 26, "usage_type": "call"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 13, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 43, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 46, "usage_type": "call"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 49, "usage_type": "name"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 51, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 65, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 65, "usage_type": "name"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 71, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 71, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 73, "usage_type": "call"}, {"api_name": "Dashboards.signals.detail_object_viewed_signal.send", "line_number": 74, "usage_type": "call"}, {"api_name": "Dashboards.signals.detail_object_viewed_signal", "line_number": 74, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 75, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 69, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects.create", "line_number": 83, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 83, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 78, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 79, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 79, "usage_type": "name"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 102, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 102, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 113, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 114, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 99, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 99, "usage_type": "name"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 122, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 122, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 133, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 134, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 119, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAdminUser", "line_number": 119, "usage_type": "name"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 142, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 142, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 143, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 144, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 147, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 147, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 147, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 149, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 149, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 149, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 138, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 139, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 139, "usage_type": "name"}, {"api_name": "productivity.models.Jobs.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 157, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 162, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 164, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 152, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects.filter", "line_number": 171, "usage_type": "call"}, {"api_name": "productivity.models.Jobs.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "productivity.models.Jobs", "line_number": 171, "usage_type": "name"}, {"api_name": "productivity.serializers.JobsSerializers", "line_number": 172, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 173, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 167, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 168, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 168, "usage_type": "name"}]} +{"seq_id": "18225681791", "text": "# -*- coding: utf-8 -*-\n#\n# datetime:2022/3/6 20:41\n\n\"\"\"\ndescription:train zigzag model start \n\"\"\"\nimport sys\nimport time\nimport os\nfrom tensorflow.keras.models import load_model\nimport argparse\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))\nfrom src.preprocess import load_data\nfrom src.preprocess.process_data import find_hard_examples\nfrom src.model.evaluation_model import evaluation_with_predict\nfrom src.model.train_model import TranModel\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # \"1,2,3\"\nos.environ['KERAS_BACKEND'] = 'tensorflow'\n\n# Training settings\nparser = argparse.ArgumentParser(description='tf zigzag')\nparser.add_argument('--modelKind', type=str, default='BGRU',\n help='modelKind')\nparser.add_argument('--predThreshold', type=float, default=0.4,\n help='Threshold')\nparser.add_argument('--epochTimes', type=str, default='30,2,2,2',\n help='step epochTimes')\nparser.add_argument('--learningRate', type=str, default='0.0012, 0.001, 0.0003, 0.0001', help='learningRate')\nparser.add_argument('--trainTimes', type=int, default=100,\n help='evaluation only option')\nargs = parser.parse_args()\n\n\ndef train_begin(model_path, train_times):\n \"\"\"\n train process and keep train \n train_times: loop train_times : 3.2 origin -3.2 -3.3\n \"\"\"\n if not os.path.exists(os.path.join(modelPath, model_name_list[0])):\n mv_model_command = 'cp ' + ' ' + os.path.join(modelPathAll, modelKind,\n model_name_list[0]) + ' \\\"' + modelPath + '\\\"'\n os.system(mv_model_command)\n print(mv_model_command)\n max_acc = 0\n max_f_score = 0\n max_stop_step = 7\n stop_step = 0\n stop_flag = False\n model_last, i = load_data.return_last_count(\n metricsFile, model_path, model_name_list)\n if i == 0:\n model_name = str(i + 1) + model_name_list[0]\n evaluation_with_predict(\n testDataSetPath, model_last, metricsFile, predThreshold, model_name)\n if is_find_hard:\n hard_file_name = 'hard-' + serialNumber\n del_hard_file_command, hard_file_path = hard_name_and_path(\n datePath, hard_file_name)\n else:\n hard_file_name = 'tigress'\n while i < train_times:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n print('----------- loop times :' + str(i + 1) +\n '---train begin------------------')\n # 1\n model_name = str(i + 1) + model_name_list[1]\n model_last = trainModel.train_3_2_1(model_last, model_name)\n print('model3.21 eval result ---------------------------------')\n if is_find_hard:\n os.makedirs(hard_file_path, exist_ok=True)\n find_hard_examples(datePath, model_last,\n predThreshold, fileLen, hard_file_name)\n acc, f_score = evaluation_with_predict(\n testDataSetPath, model_last, metricsFile, predThreshold, model_name)\n # stop early\n if not stop_flag and max_acc > 0.78 and max_f_score > 0.67:\n stop_flag = True\n if stop_flag and stop_step >= max_stop_step and i > 12:\n with open(metricsFile, 'a+') as fwrite:\n fwrite.write(\n f\"stop early,max_f_1,{max_f_score},max_acc,{max_acc}\\n\")\n break\n if stop_flag and max_f_score > f_score:\n stop_step = stop_step + 1\n elif max_f_score < f_score or max_acc < acc:\n stop_step = 0\n\n max_acc = max(max_acc, acc)\n max_f_score = max(max_f_score, f_score)\n\n # 2\n model_last2 = load_model(os.path.join(model_path, model_name))\n model_name = str(i + 1) + model_name_list[2]\n model_last = trainModel.train_3_2_2(\n model_last, model_last2, model_name, hard_file_name)\n print('model3.22 eval result')\n evaluation_with_predict(\n testDataSetPath, model_last, metricsFile, predThreshold, model_name)\n # 3\n model_name = str(i + 1) + model_name_list[3]\n model_last = trainModel.train_3_3(model_last, model_name)\n print('model3.3 eval result ')\n evaluation_with_predict(\n testDataSetPath, model_last, metricsFile, predThreshold, model_name)\n if is_find_hard:\n os.system(del_hard_file_command)\n i = i + 1\n print('----------- loop times: ' + str(i) +\n '--train end ------------------')\n\n\ndef hard_name_and_path(date_path, hard_file_name):\n # rm -rf /data1/yjy/dataset/zigzag/data-step-20/hard/*\n hard_file_path = os.path.join(date_path, 'train', hard_file_name)\n del_hard_file_command = 'rm -rf ' + hard_file_path\n print(del_hard_file_command)\n return del_hard_file_command, hard_file_path\n\n\n# /home/yjy/code/zigzag/zigzag03/zigzag/\n# /data1/yjy/dataset/zigzag/model/\nif __name__ == \"__main__\":\n batchSize = 64\n vectorDim = 40\n maxLen = 500\n dropout = 0.2\n dataFileName = 'zigzag_vector_220720'\n step_len = 40 # step_len\n datePath = os.path.join('/data1/yjy/dataset/', dataFileName) # /data1/yjy/dataset/zigzag_vector_220720/\n modelPathAll = \"/data1/yjy/dataset/zigzag/model\"\n resultPath = '/data1/yjy/dataset/zigzag/result' # result save path\n model_name_list = ['model-3.1.h5', 'model-3.21.h5',\n 'model-3.22.h5', 'model-3.3.h5']\n is_find_hard = False\n fileLen = 320\n modelKind = args.modelKind # select model\n predThreshold = args.predThreshold # Threshold\n epochTimes = [int(item) for item in args.epochTimes.split(',')]\n learningRate = [float(item) for item in args.learningRate.split(',')]\n trainTimes = args.trainTimes\n serialNumber = f'{dataFileName}-epochTimes-{epochTimes}-learningRate-{learningRate}-modelKind-{modelKind}-predThreshold-{predThreshold}-is_find_hard-{str(is_find_hard)}'\n modelPath = os.path.join(modelPathAll, modelKind, serialNumber)\n resultPath = os.path.join(resultPath, modelKind, serialNumber)\n os.makedirs(modelPath, exist_ok=True)\n os.makedirs(resultPath, exist_ok=True)\n testDataSetPath = os.path.join(datePath, 'test')\n metricsFileName = serialNumber + '.csv'\n metricsFile = os.path.join(resultPath, metricsFileName)\n print(modelPath)\n trainModel = TranModel(modelKind, datePath, modelPath, resultPath, batchSize, maxLen, vectorDim, dropout,\n serialNumber, predThreshold, epochTimes, learningRate, fileLen, step_len)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n modelLast = trainModel.train_3_1(model_name_list[0])\n # evaluation_with_predict(testDataSetPath, modelLast,\n # metricsFile, predThreshold, 'model-3.1.h5')\n # train_begin(modelPath, trainTimes)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n print('-----------train end -----------')\n", "repo_name": "ZigZagframework/zigzag_framework", "sub_path": "ZigZag/ZigZag/ZigZag-Framework/train_model/zigzag/src/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "42", "api": [{"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 45, "usage_type": "call"}, {"api_name": "src.preprocess.load_data.return_last_count", "line_number": 52, "usage_type": "call"}, {"api_name": "src.preprocess.load_data", "line_number": 52, "usage_type": "name"}, {"api_name": "src.model.evaluation_model.evaluation_with_predict", "line_number": 56, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 65, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 65, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "src.preprocess.process_data.find_hard_examples", "line_number": 74, "usage_type": "call"}, {"api_name": "src.model.evaluation_model.evaluation_with_predict", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "src.model.evaluation_model.evaluation_with_predict", "line_number": 100, "usage_type": "call"}, {"api_name": "src.model.evaluation_model.evaluation_with_predict", "line_number": 106, "usage_type": "call"}, {"api_name": "os.system", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 147, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "src.model.train_model.TranModel", "line_number": 153, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 155, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 155, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 160, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "72356934207", "text": "import enum\nimport random\n\nimport tutorials.chapter_07_classes.backtracking.stack_queue_impl as collections\n\n\nclass Direction(enum.Enum):\n UP = (-1, 0)\n LEFT = (0, -1)\n RIGHT = (0, 1)\n DOWN = (1, 0)\n\nclass SlidingBlock:\n def __init__(self, name, x_coord, y_coord, length, width):\n self.name = name\n self.x_coord = x_coord\n self.y_coord = y_coord\n self.length = length\n self.width = width\n\n self.occupied = [] # cache for performance\n\n def move(self, direction):\n self.x_coord += direction.value[0]\n self.y_coord += direction.value[1]\n\n self.occupied = []\n\n def border(self):\n if not self.occupied:\n self.occupied = [(self.x_coord + i, self.y_coord + j) \n for i in range(self.length) for j in range(self.width) \n if i == 0 or i + 1 == self.length or j == 0 or j + 1 == self.width\n ]\n return self.occupied\n\n def is_occupied(self, x, y):\n return self.x_coord <= x < self.x_coord + self.length and self.y_coord <= y < self.y_coord + self.width\n\n def __str__(self):\n return '(name={}, x_coord={}, y_coord={}, length={}, width={})'.format(\n self.name, self.x_coord, self.y_coord, self.length, self.width)\n\n def __repr__(self):\n return 'SlidingBlock(name={}, x_coord={}, y_coord={}, length={}, width={})'.format(\n self.name, self.x_coord, self.y_coord, self.length, self.width)\n\n\nclass SlidingGameBoard:\n def __init__(self, length, width, sliding_blocks: list, finish_criteria, shape_to_num = None, steps = None):\n self.length = length\n self.width = width\n self.sliding_blocks = sliding_blocks\n self.finish_criteria = finish_criteria\n\n self.name_to_block = {x.name: x for x in sliding_blocks}\n self.shape_to_num = shape_to_num if shape_to_num else {shape:i+1 for i, shape in enumerate(set([(block.length, block.width) for block in sliding_blocks]))}\n self.steps = steps if steps else []\n self.layout = None\n\n def get_layout(self):\n if not self.layout:\n self.layout = [[0] * self.width for _ in range(self.length)]\n for b in self.sliding_blocks:\n spaces = b.border()\n for (x, y) in spaces:\n self.layout[x][y] = self.shape_to_num[(b.length, b.width)]\n\n return self.layout\n\n def hash_board(self, zobrist_tbl):\n hb = self.get_layout()\n\n ret = 0\n for i in range(self.length):\n for j in range(self.width):\n ret ^= zobrist_tbl[i][j][hb[i][j]]\n\n # we could also add symmetric board as well if we know board is symmetric/door location\n # but rush hour game is not symmetric, so we leave this optimization out.\n return ret\n\n # since each move just has one step to one direction, collision can be detected by borders\n def is_movable(self, direction, block):\n layout = self.get_layout()\n for x,y in block.border():\n x += direction.value[0]\n y += direction.value[1]\n if x<0 or x >= self.length or y<0 or y>=self.width:\n return False\n if not block.is_occupied(x, y) and layout[x] and layout[x][y] and layout[x][y] != 0:\n return False\n return True\n\n def move(self, block_name, direction):\n block = self.name_to_block[block_name]\n block.move(direction)\n self.steps.append((block.name, block.x_coord, block.y_coord, direction.name))\n\n def __str__(self):\n return str(self.steps) + ', ' + str(self.layout)\n\n def __repr__(self): # without this, during debug, list of this class does not show str.\n return str(self.steps) + ', ' + str(self.layout)\n\n def deep_clone(self):\n # copy.deepcopy and pickle are slow since we have internal states\n # return pickle.loads(pickle.dumps(self))\n return SlidingGameBoard(self.length, self.width,\n [SlidingBlock(b.name, b.x_coord, b.y_coord, b.length, b.width) for b in self.sliding_blocks],\n self.finish_criteria,\n self.shape_to_num,\n self.steps.copy())\n\ndef solve(board: SlidingGameBoard):\n print(\"board shape: len={}, wid={}\".format(board.length, board.width))\n\n zobrist_tbl = [[[random.randint(1, 2**64 - 1)\n for _ in range(len(board.shape_to_num)+1)] # 1 for empty space case\n for _ in range(board.width)] for _ in range(board.length)]\n dirs = list(Direction) # avoid regenerate list everytime, for looping all directions\n results = []\n cache = {board.hash_board(zobrist_tbl)} # filter out explored cases, cut branches\n queue = collections.MyQueue() # this means breathe first search (BFS), find shortest paths\n queue.enqueue(board)\n while queue:\n board = queue.dequeue()\n print('queue size={}, steps={}'.format(len(queue), len(board.steps)))\n\n for block in board.sliding_blocks:\n for d in dirs: # loop through UP, DOWN, LEFT and RIGHT in Direction\n if board.is_movable(d, block):\n move_to(d, block.name, board, cache, queue, results, zobrist_tbl)\n\n return results\n\n\ndef move_to(direction, block_name, board, cache, queue, results, zobrist_tbl):\n new_board = board.deep_clone()\n new_board.move(block_name, direction)\n\n if new_board.finish_criteria(new_board): # if it is a solution, add to result and stop\n results.append(new_board)\n else:\n hb = new_board.hash_board(zobrist_tbl)\n if hb not in cache: # check if it is already dealt with. If not, add it as new case.\n cache.add(hb)\n queue.enqueue(new_board)\n\n\n\n# https://github.com/jeantimex/klotski\n# https://www.jianshu.com/p/4a77d6253d33\n# https://inst.eecs.berkeley.edu/~cs61c/fa14/projs/02/\n# https://www.ixueshu.com/document/83b73e57dd554a55100410a5ba28df48318947a18e7f9386.html\n\n# https://stackify.com/how-to-use-python-profilers-learn-the-basics/\n# https://pythonspeed.com/articles/beyond-cprofile/\n", "repo_name": "bigfrog10/python-tutorial", "sub_path": "src/tutorials/chapter_07_classes/backtracking/klotski.py", "file_name": "klotski.py", "file_ext": "py", "file_size_in_byte": 6140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "42", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 118, "usage_type": "call"}, {"api_name": "tutorials.chapter_07_classes.backtracking.stack_queue_impl.MyQueue", "line_number": 124, "usage_type": "call"}, {"api_name": "tutorials.chapter_07_classes.backtracking.stack_queue_impl", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "10061603973", "text": "import cv2\nimport numpy as np\n\nfrom .colors import comparator_Berlin_Kay_lab, sRGB_to_lab\n\n\ndef cluster_with_distance(pixels, comparator):\n matrix = np.zeros(shape=(pixels.shape[0], len(comparator)))\n for i, values in enumerate(comparator.values()):\n matrix[:, i] = np.linalg.norm(pixels - values, ord=2, axis=1)\n labels, counts = np.unique(np.argmin(matrix, axis=1), return_counts=True)\n indices = np.argsort(counts)[::-1]\n labels = np.take_along_axis(labels, indices, axis=0)\n counts = np.take_along_axis(counts, indices, axis=0)\n labels = np.array(list(comparator.keys()))[labels]\n percentages = np.array(counts) / pixels.shape[0]\n return labels, percentages\n\n\ndef eval_distance_to_colors(image, mask, n=5):\n \"\"\"\n return the n closest colors to the image_xyz\n\n image: np.array of shape (h, w, 3)\n mask: np.array\n \"\"\"\n mask_size = image.shape[:2]\n img = sRGB_to_lab(image / 255)\n img = img.reshape(-1, 3)\n\n # select only pixels that are in the mask\n binary_mask = segment2mask(\n mask,\n mask_shape=mask_size,\n ).reshape(-1)\n to_keep = np.where(binary_mask == 1)[0]\n img = img[to_keep]\n # compute closest colors\n labels, percentages = cluster_with_distance(img, comparator_Berlin_Kay_lab)\n return labels[:n], percentages[:n]\n\n\ndef segment2mask(segment, mask_shape, normalized=False):\n \"\"\"\n Convert a segment (list of (x,y) points) to a binary mask of the specified shape.\n Args:\n segment (np.ndarray): the segment mask as a numpy array of shape (n,2)\n mask_shape (tuple): the desired shape of the output binary mask as a tuple of (height, width)\n normalized (bool): whether the segment is normalized to [0, 1] or not\n Returns:\n mask (np.ndarray): the binary mask as a numpy array of shape (height, width)\n \"\"\"\n h, w = mask_shape\n if normalized:\n coords = np.round(segment * np.array([w, h])).astype(int)\n else:\n coords = segment.astype(int)\n mask = np.zeros(mask_shape, dtype=np.uint8)\n if len(coords) > 0:\n # Fill the mask\n cv2.fillPoly(mask, [coords], 255)\n mask[mask == 255] = 1\n return mask\n", "repo_name": "grimalPaul/TIAM", "sub_path": "src/tiam/attribute_binding.py", "file_name": "attribute_binding.py", "file_ext": "py", "file_size_in_byte": 2175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "33", "api": [{"api_name": "numpy.zeros", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.take_along_axis", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "colors.sRGB_to_lab", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 36, "usage_type": "call"}, {"api_name": "colors.comparator_Berlin_Kay_lab", "line_number": 39, "usage_type": "argument"}, {"api_name": "numpy.round", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.fillPoly", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "5819173019", "text": "import base64\nimport inspect\nimport logging\nimport os\nimport re\nimport time\nfrom typing import Optional, List\n\nimport discord\nimport requests\nimport ujson\n\nimport db\nimport game_sync\nimport gen_token\nimport main\nimport utils\nfrom utils import plural\n\n\nreport_commands = set()\ncommand_functions = {}\n\n\nasync def handle(message: discord.Message):\n log.info(f\"Recieved DM from {utils.detailed_user(message)}: \\\"{message.content}\\\"\")\n command_name = message.content.partition(' ')[0].lower()\n\n if command_name in command_functions:\n log.info(f\"Handling '{command_name}' command\")\n await report_command_used(command_name, message)\n await command_functions[command_name](message)\n elif message.content.lower() == 'ok':\n await message.channel.send(message.content)\n else:\n await message.channel.send(\"Unrecognized command, try `help`\")\n\n\ndef command(format_regex: Optional[re.Pattern] = None, report_usage: bool = False):\n def outer(func: callable):\n command_name = func.__name__.removeprefix('command_')\n use_message_split = 'message_split' in inspect.signature(func).parameters\n\n if report_usage:\n report_commands.add(command_name)\n\n try:\n command_doc = func.__doc__.replace('\\n ', '\\n')\n func.help = f\"```\\n{command_doc}```\"\n except AttributeError:\n log.error(f\"{func.__name__} has no docstring\")\n\n async def inner(message: discord.Message):\n if format_regex and not format_regex.match(message.content):\n log.warning(\"Bad command format\")\n await message.channel.send(f\"Incorrect command format.\\n{func.help}\")\n return\n\n if use_message_split:\n return await func(message, re_command_split.split(message.content))\n else:\n return await func(message)\n\n inner.help = func.help\n command_functions[command_name] = inner\n return inner\n\n return outer\n\n\n@command()\nasync def command_help(message: discord.Message, message_split: List[str]):\n \"\"\"\n help COMMAND\n\n COMMAND: The command to get the parameter info for (optional)\n \"\"\"\n\n if len(message_split) > 1 and message_split[1] in command_functions:\n await message.channel.send(command_functions[message_split[1]].help)\n else:\n add_bot_link = discord.utils.oauth_url('970375635027525652', permissions=discord.Permissions(2147560512), scopes=('bot',))\n commands_available = '\\n'.join(command_functions)\n\n response = \"Alright, looks you want to add your TAS project to this bot (or are just curious about what the help command says). Awesome! So, steps:\" \\\n \"\\n\\n1. Register GitHub app with your account and repo (you likely need to be the repo owner): \" \\\n \"\" \\\n f\"\\n2. Add bot to your server: <{add_bot_link}>\" \\\n \"\\n3. Run the `register_project` command, see `help register_project` for parameters. You can also use this to edit existing projects.\" \\\n \"\\n4. (Optional) Add other admins with `edit_admins`, and add mod(s) for sync testing with `add_mods`.\" \\\n \"\\n\\nAvailable commands:\" \\\n f\"\\n```\\n{commands_available}```\"\n\n await message.channel.send(response)\n\n\n@command(re.compile(r'(?i)register_project .+ \\d+ .+/.+ .+ [YN] [YN] [YN] [YN]'), report_usage=True)\nasync def command_register_project(message: discord.Message, message_split: List[str]):\n \"\"\"\n register_project NAME IMPROVEMENTS_CHANNEL_ID REPOSITORY ACCOUNT COMMIT_DRAFTS IS_LOBBY ENSURE_LEVEL DO_SYNC_CHECK\n\n NAME: The name of the project (in quotes if needed), ex: \"Into the Jungle\", \"Strawberry Jam\", \"Celeste maingame\", \"Celeste mindash\"\n IMPROVEMENTS_CHANNEL_ID: Turn on developer mode in Discord advanced settings, then right click the channel and click Copy ID\n REPOSITORY: Either as OWNER/REPO, or as OWNER/REPO/PROJECT if you have multiple projects in a repo\n ACCOUNT: Your GitHub account name\n COMMIT_DRAFTS: Automatically commit drafts to the root directory [Y or N]\n IS_LOBBY: Whether this channel is for a lobby, which handles file validation differently [Y or N]\n ENSURE_LEVEL: Whether to make sure the level's name is in the message when validating a posted file [Y or N]\n DO_SYNC_CHECK: Do a nightly sync test of all your files by actually running the game (highly recommended) [Y or N]\n \"\"\"\n\n log.info(\"Verifying project\")\n await message.channel.send(\"Verifying...\")\n _, name, improvements_channel_id, repo_and_subdir, github_account, commit_drafts, is_lobby, ensure_level, do_run_validation = message_split\n improvements_channel_id = int(improvements_channel_id)\n projects = db.projects.dict()\n editing = improvements_channel_id in projects\n\n if editing:\n if not await is_admin(message, projects[improvements_channel_id]):\n return\n\n log.info(\"This project already exists, preserving some keys\")\n preserved_keys = ('install_time', 'pin', 'mods', 'last_run_validation', 'admins', 'desyncs', 'filetimes')\n previous = {key: projects[improvements_channel_id][key] for key in preserved_keys}\n\n # verify improvements channel exists\n improvements_channel = client.get_channel(improvements_channel_id)\n if not improvements_channel:\n error = f\"Channel {improvements_channel_id} doesn't exist\"\n log.error(error)\n await message.channel.send(error)\n return\n\n # verify needed permissions in improvements channel\n missing_permissions = utils.missing_channel_permissions(improvements_channel)\n if missing_permissions:\n error = f\"Don't have {missing_permissions[0]} permission for #{improvements_channel.name} ({improvements_channel_id})\"\n log.error(error)\n await message.channel.send(error)\n return\n\n # verify github account exists\n r = requests.get(f'https://api.github.com/users/{github_account}', headers={'Accept': 'application/vnd.github.v3+json'})\n if r.status_code != 200:\n log.error(f\"GitHub account {github_account} doesn't seem to exist, status code is {r.status_code}\")\n await message.channel.send(f\"GitHub account \\\"{github_account}\\\" doesn't seem to exist\")\n return\n\n # verify app is installed\n try:\n main.generate_request_headers(github_account)\n except gen_token.InstallationOwnerMissingError as missing_installation_owner:\n log.error(f\"GitHub account {missing_installation_owner} doesn't have the app installed\")\n await message.channel.send(f\"GitHub account {missing_installation_owner} doesn't have the app installed, please do so here: https://github.com/apps/celestetas-improvements-tracker\")\n return\n\n # verify repo exists\n repo_split = repo_and_subdir.rstrip('/').split('/')\n repo, subdir = '/'.join(repo_split[:2]), '/'.join(repo_split[2:])\n r = requests.get(f'https://api.github.com/repos/{repo}', headers={'Accept': 'application/vnd.github.v3+json'})\n if r.status_code != 200:\n log.error(f\"Repo {repo} doesn't seem to publically exist, status code is {r.status_code}\")\n await message.channel.send(f\"Repo \\\"{repo}\\\" doesn't seem to publically exist\")\n return\n\n # verify subdir exists in repo\n if subdir:\n r = requests.get(f'https://api.github.com/repos/{repo}/contents/{subdir}', headers={'Accept': 'application/vnd.github.v3+json'})\n if r.status_code != 200 or 'type' in ujson.loads(r.content):\n log.error(f\"Directory {subdir} doesn't seem to exist in repo {repo}, status code is {r.status_code}\")\n await message.channel.send(f\"Directory \\\"{subdir}\\\" doesn't seem to exist in \\\"{repo}\\\"\")\n return\n\n # verify not adding run validation to a lobby\n if do_run_validation.lower() == 'y' and is_lobby.lower() == 'y':\n log.error(\"Can't add run validation to a lobby project\")\n await message.channel.send(\"Enabling run validation for a lobby project is not allowed\")\n return\n\n log.info(\"Verification successful\")\n\n current_time = int(time.time())\n registered_project = {'name': name.replace('\"', ''),\n 'repo': repo,\n 'installation_owner': github_account,\n 'admins': (message.author.id,),\n 'install_time': current_time,\n 'commit_drafts': commit_drafts.lower() == 'y',\n 'is_lobby': is_lobby.lower() == 'y',\n 'ensure_level': ensure_level.lower() == 'y',\n 'do_run_validation': do_run_validation.lower() == 'y',\n 'last_run_validation': None,\n 'pin': None,\n 'subdir': subdir,\n 'mods': [],\n 'desyncs': [],\n 'last_commit_time': current_time,\n 'filetimes': {},\n 'sync_check_timed_out': False}\n\n if not editing:\n await message.channel.send(\"Generating path cache...\")\n main.generate_path_cache(improvements_channel_id, registered_project)\n pinned_message = await main.edit_pin(improvements_channel, create_from_project=registered_project)\n await pinned_message.pin()\n registered_project['pin'] = pinned_message.id\n db.project_logs.set(improvements_channel_id, [])\n else:\n for previous_key in previous:\n registered_project[previous_key] = previous[previous_key]\n\n await main.edit_pin(improvements_channel)\n\n db.projects.set(improvements_channel_id, registered_project)\n main.fast_project_ids.add(improvements_channel_id)\n project_added_log = f\"{'Edited' if editing else 'Added'} project {improvements_channel_id}: {registered_project}\"\n log.info(project_added_log)\n db.history_log.set(utils.log_timestamp(), project_added_log)\n\n if editing:\n await message.channel.send(\"Successfully verified and edited project.\")\n else:\n add_mods_text = \" Since you are doing sync checking, be sure to add mods (if need be) with the command `add_mods`.\" if do_run_validation.lower() == 'y' else \"\"\n await message.channel.send(\"Successfully verified and added project! If you want to change your project's settings, \"\n f\"run the command again and it will overwrite what was there before.{add_mods_text}\")\n\n\n@command(re.compile(r'(?i)add_mods .+ .+'), report_usage=True)\nasync def command_add_mods(message: discord.Message, message_split: List[str]):\n \"\"\"\n add_mods PROJECT_NAME MODS\n\n PROJECT_NAME: The name of your project (in quotes if needed). If you have multiple improvement channels with the same project name, this will update all of them\n MODS: The mod(s) used by your project, separated by spaces (dependencies are automatically handled). Ex: EGCPACK, WinterCollab2021, conquerorpeak103\n \"\"\"\n\n project_search_name = message_split[1].replace('\"', '')\n project_mods_added = False\n\n for project in db.projects.get_by_name(project_search_name):\n if not await is_admin(message, project):\n break\n elif not project['do_run_validation']:\n log.warning(f\"Trying to add mods to project: {project['name']}, but run validation is disabled\")\n await message.channel.send(f\"Project \\\"{project['name']}\\\" has sync checking disabled\")\n continue\n\n log.info(f\"Adding mods for project: {project['name']}\")\n project_mods_added = True\n mods_given = [mod.removesuffix('.zip') for mod in message_split[2:]]\n project_mods = set(project['mods'])\n log.info(f\"{len(project_mods)} mod{plural(project_mods)} before adding: {project_mods}\")\n project_mods = project_mods.union(mods_given)\n log.info(f\"{len(project_mods)} mod{plural(project_mods)} after adding: {project_mods}\")\n project['mods'] = list(project_mods)\n db.projects.set(project['project_id'], project)\n mods_missing = set()\n\n for mod_given in mods_given:\n all_project_mods = project_mods.union(game_sync.get_mod_dependencies(mod_given))\n\n log.info(f\"{len(all_project_mods)} total mod{plural(all_project_mods)}: {all_project_mods}\")\n installed_mods = [item.removesuffix('.zip') for item in os.listdir(game_sync.mods_dir()) if item.endswith('.zip')]\n\n for mod in all_project_mods:\n if mod not in installed_mods:\n mods_missing.add(mod)\n\n await message.channel.send(f\"Project \\\"{project['name']}\\\" now has {len(all_project_mods)} mod{plural(all_project_mods)} to load for sync testing\")\n\n if mods_missing:\n log.warning(f\"Missing {len(mods_missing)} mod(s) from installed: {mods_missing}\")\n mods_missing_formatted = '\\n'.join(sorted(mods_missing))\n await (await client.fetch_user(219955313334288385)).send(f\"hey you need to install some mods for sync testing\\n```\\n{mods_missing_formatted}```\")\n await message.channel.send(f\"The following mod(s) are not currently prepared for sync testing (Kataiser has been automatically DM'd about it):\\n```\\n{mods_missing_formatted}```\")\n\n if not project_mods_added:\n log.warning(f\"No projects found matching: {project_search_name}\")\n await message.channel.send(\"No projects (with sync checking enabled) matching that name found\")\n\n\n@command(re.compile(r'(?i)rename_file .+ .+\\.tas .+\\.tas'), report_usage=True)\nasync def command_rename_file(message: discord.Message, message_split: List[str]):\n \"\"\"\n rename_file PROJECT_NAME FILENAME_BEFORE FILENAME_AFTER\n\n PROJECT_NAME: The name of your project (in quotes if needed). If you have multiple improvement channels with the same project name, this will search in all of them\n FILENAME_BEFORE: The current name of the TAS file you want to rename (with .tas)\n FILENAME_AFTER: What you want the TAS file to be renamed to (with .tas)\n \"\"\"\n\n project_search_name = message_split[1].replace('\"', '')\n filename_before, filename_after = message_split[2:]\n renamed_file = False\n\n if filename_before == filename_after:\n await message.channel.send(\"what\")\n return\n\n for project in db.projects.get_by_name(project_search_name):\n main.generate_request_headers(project['installation_owner'])\n path_cache = main.generate_path_cache(project['project_id'])\n\n if filename_before not in path_cache:\n not_found_text = f\"{filename_before} not in project {project['name']}\"\n log.warning(not_found_text)\n await message.channel.send(not_found_text)\n return\n\n renaming_text = f\"Renaming `{filename_before}` to `{filename_after}` in project \\\"{project['name']}\\\"\"\n log.info(renaming_text)\n await message.channel.send(renaming_text)\n repo = project['repo']\n file_path = path_cache[filename_before]\n renamed_file = True\n user_github_account = utils.get_user_github_account(message.author.id)\n\n log.info(f\"Downloading {filename_before}\")\n r = requests.get(f'https://api.github.com/repos/{repo}/contents/{file_path}', headers=main.headers)\n utils.handle_potential_request_error(r, 200)\n tas_downloaded = base64.b64decode(ujson.loads(r.content)['content'])\n\n # commit 1: delete old file\n log.info(\"Performing delete commit\")\n data = {'message': f\"Renamed {filename_before} to {filename_after} (deleting)\", 'sha': main.get_sha(repo, file_path)}\n if user_github_account:\n data['author'] = {'name': user_github_account[0], 'email': user_github_account[1]}\n log.info(f\"Setting commit author to {data['author']}\")\n r = requests.delete(f'https://api.github.com/repos/{repo}/contents/{file_path}', headers=main.headers, data=ujson.dumps(data))\n utils.handle_potential_request_error(r, 200)\n time.sleep(1) # just to be safe\n\n # commit 2: create new file (or overwrite)\n log.info(\"Performing recreate commit\")\n file_path_after = file_path.replace(filename_before, filename_after)\n data = {'message': f\"Renamed {filename_before} to {filename_after} (creating)\", 'content': base64.b64encode(tas_downloaded).decode('UTF8')}\n if filename_after in path_cache:\n data['sha'] = main.get_sha(repo, file_path_after)\n expected_status = 200\n else:\n expected_status = 201\n if user_github_account:\n data['author'] = {'name': user_github_account[0], 'email': user_github_account[1]}\n log.info(f\"Setting commit author to {data['author']}\")\n r = requests.put(f'https://api.github.com/repos/{repo}/contents/{file_path_after}', headers=main.headers, data=ujson.dumps(data))\n utils.handle_potential_request_error(r, expected_status)\n\n if r.status_code == expected_status:\n db.path_caches.enable_cache()\n db.path_caches.remove_file(project['project_id'], filename_before)\n db.path_caches.add_file(project['project_id'], filename_after, file_path_after)\n db.path_caches.disable_cache()\n log.info(\"Rename successful\")\n await message.channel.send(\"Rename successful\")\n improvements_channel = client.get_channel(project['project_id'])\n await improvements_channel.send(f\"{message.author.mention} renamed `{filename_before}` to `{filename_after}`\")\n await main.edit_pin(improvements_channel)\n else:\n log.error(\"Rename unsuccessful\")\n await message.channel.send(\"Rename unsuccessful\")\n\n if not renamed_file:\n log.warning(\"No files renamed\")\n await message.channel.send(f\"{filename_before} not found in any project named {project_search_name}\")\n\n\n@command(re.compile(r'(?i)edit_admin .+ \\d+'), report_usage=True)\nasync def command_edit_admin(message: discord.Message, message_split: List[str]):\n \"\"\"\n edit_admin PROJECT_NAME ADMIN_ID ADDING\n\n PROJECT_NAME: The name of your project (in quotes if needed). If you have multiple improvement channels with the same project name, this will search in all of them\n ADMIN_ID: The Discord ID (not the username) of the user you're adding or removing\n ADDING: Y if adding admin, N if removing admin\n \"\"\"\n\n project_search_name = message_split[1].replace('\"', '')\n admin_id = int(message_split[2])\n adding = message_split[3].lower() == 'y'\n\n for project in db.projects.get_by_name(project_search_name):\n if not await is_admin(message, project):\n continue\n\n try:\n new_admin = await client.fetch_user(admin_id)\n except discord.NotFound:\n log.error(f\"User {admin_id} not found\")\n await message.channel.send(f\"User with ID {admin_id} could not be found\")\n return\n\n if adding:\n if admin_id in project['admins']:\n already_admin = f\"{utils.detailed_user(user=new_admin)} is already an admin for project \\\"{project['name']}\\\".\"\n log.warning(already_admin)\n await message.channel.send(already_admin)\n else:\n project['admins'].append(admin_id)\n db.projects.set(project['project_id'], project)\n added_admin = f\"Added {utils.detailed_user(user=new_admin)} as an admin to project \\\"{project['name']}\\\".\"\n log.info(added_admin)\n await message.channel.send(added_admin)\n await new_admin.send(f\"{message.author.global_name} has added you as an admin to the \\\"{project['name']}\\\" TAS project.\")\n await main.edit_pin(client.get_channel(project['project_id']))\n else:\n if admin_id in project['admins']:\n project['admins'].remove(admin_id)\n db.projects.set(project['project_id'], project)\n removed_admin = f\"Removed {utils.detailed_user(user=new_admin)} as an admin from project \\\"{project['name']}\\\".\"\n log.info(removed_admin)\n await message.channel.send(removed_admin)\n await new_admin.send(f\"{message.author.global_name} has removed you as an admin from the \\\"{project['name']}\\\" TAS project.\")\n await main.edit_pin(client.get_channel(project['project_id']))\n else:\n not_admin = f\"{utils.detailed_user(user=new_admin)} is not an admin for project \\\"{project['name']}\\\".\"\n log.warning(not_admin)\n await message.channel.send(not_admin)\n\n\n@command()\nasync def command_about(message: discord.Message):\n \"\"\"\n about\n\n (No parameters)\n \"\"\"\n\n text = \"Source: \" \\\n \"\\nProjects (improvement channels): {0}\" \\\n \"\\nServers: {1}\" \\\n \"\\nGithub installations: {2}\" \\\n \"\\nCurrent uptime: {3} hours\" \\\n \"\\nNightly sync check: {4} project{6}\" \\\n \"\\nImprovements/drafts processed and committed: {5}\"\n\n sync_checks = 0\n installations = set()\n\n for project in db.projects.get_all(consistent_read=False):\n installations.add(project['installation_owner'])\n\n if project['do_run_validation']:\n sync_checks += 1\n\n text_out = text.format(main.projects_count(),\n len(client.guilds),\n len(installations),\n round((time.time() - main.login_time) / 3600, 1),\n sync_checks,\n db.history_log.size(), # techically inaccurate because add/edit project logs but close enough\n plural(sync_checks))\n\n log.info(text_out)\n await message.channel.send(text_out)\n\n\n@command(re.compile(r'(?i)about_project .+'))\nasync def command_about_project(message: discord.Message, message_split: List[str]):\n \"\"\"\n about PROJECT_NAME\n\n PROJECT_NAME: The name of your project (in quotes if needed). If you have multiple improvement channels with the same project name, this will show info for all of them\n \"\"\"\n\n # message_split = re_command_split.split(message.content)\n project_search_name = message_split[1].replace('\"', '')\n found_matching_project = False\n text = \"Name: **{0}**\" \\\n \"\\nRepo: <{1}>\" \\\n \"\\nImprovement channel: <#{2}>\" \\\n \"\\nAdmin{12}: {3}\" \\\n \"\\nGithub installation owner: {4}\" \\\n \"\\nInstall time: \" \\\n \"\\nPin: <{6}>\" \\\n \"\\nCommit drafts: `{7}`\" \\\n \"\\nIs lobby: `{8}`\" \\\n \"\\nEnsure level name in posts: `{9}`\" \\\n \"\\nDo sync check: `{10}`\" \\\n \"{11}\"\n\n for project in db.projects.get_by_name(project_search_name):\n if project['do_run_validation']:\n last_run = project['last_run_validation']\n\n if last_run:\n last_sync_check = f\"\\nLast sync check: \"\n else:\n last_sync_check = \"\\nLast sync check: `Not yet run`\"\n else:\n last_sync_check = \"\"\n\n repo = project['repo']\n subdir = project['subdir']\n admins = [utils.detailed_user(user=await client.fetch_user(admin)) for admin in project['admins']]\n text_out = text.format(project['name'],\n f'https://github.com/{repo}/tree/HEAD/{subdir}' if subdir else f'https://github.com/{repo}',\n project['project_id'],\n ', '.join(admins),\n project['installation_owner'],\n project['install_time'],\n client.get_channel(project['project_id']).get_partial_message(project['pin']).jump_url,\n project['commit_drafts'],\n project['is_lobby'],\n project['ensure_level'],\n project['do_run_validation'],\n last_sync_check,\n plural(admins))\n\n log.info(text_out)\n await message.channel.send(text_out)\n found_matching_project = True\n\n if not found_matching_project:\n log.info(\"Found no matching projects\")\n await message.channel.send(f\"Found no projects matching that name\")\n\n\n# verify that the user editing the project is an admin (or Kataiser)\nasync def is_admin(message: discord.Message, project: dict):\n if message.author.id in (*project['admins'], 219955313334288385):\n return True\n else:\n log.warning(\"Not project admin\")\n await message.channel.send(\"Not allowed, you are not a project admin\")\n return False\n\n\n# DM Kataiser when an important command is used\nasync def report_command_used(command_name: str, message: discord.Message):\n try:\n if command_name in report_commands and message.author.id != 219955313334288385:\n await (await client.fetch_user(219955313334288385)).send(f\"Handling {command_name} from {utils.detailed_user(message)}: `{message.content}`\")\n log.info(\"Reported command usage to Kataiser\")\n except Exception as error:\n log.error(f\"Couldn't report command usage to Kataiser: {repr(error)}\")\n\n\nclient: Optional[discord.Client] = None\nlog: Optional[logging.Logger] = None\nre_command_split = re.compile(r' (?=(?:[^\"]|\"[^\"]*\")*$)')\n", "repo_name": "Kataiser/CelesteTAS-Improvements-Tracker", "sub_path": "commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 25714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "42", "api": [{"api_name": "discord.Message", "line_number": 25, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "re.Pattern", "line_number": 39, "usage_type": "attribute"}, {"api_name": "inspect.signature", "line_number": 42, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 53, "usage_type": "attribute"}, {"api_name": "discord.Message", "line_number": 72, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "discord.utils.oauth_url", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 82, "usage_type": "attribute"}, {"api_name": "discord.Permissions", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 98, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 98, "usage_type": "name"}, {"api_name": "db.projects.dict", "line_number": 116, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "utils.missing_channel_permissions", "line_number": 136, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 144, "usage_type": "call"}, {"api_name": "main.generate_request_headers", "line_number": 152, "usage_type": "call"}, {"api_name": "gen_token.InstallationOwnerMissingError", "line_number": 153, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 161, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 169, "usage_type": "call"}, {"api_name": "ujson.loads", "line_number": 170, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "main.generate_path_cache", "line_number": 204, "usage_type": "call"}, {"api_name": "main.edit_pin", "line_number": 205, "usage_type": "call"}, {"api_name": "db.project_logs.set", "line_number": 208, "usage_type": "call"}, {"api_name": "db.project_logs", "line_number": 208, "usage_type": "attribute"}, {"api_name": "main.edit_pin", "line_number": 213, "usage_type": "call"}, {"api_name": "db.projects.set", "line_number": 215, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 215, "usage_type": "attribute"}, {"api_name": "main.fast_project_ids.add", "line_number": 216, "usage_type": "call"}, {"api_name": "main.fast_project_ids", "line_number": 216, "usage_type": "attribute"}, {"api_name": "db.history_log.set", "line_number": 219, "usage_type": "call"}, {"api_name": "db.history_log", "line_number": 219, "usage_type": "attribute"}, {"api_name": "utils.log_timestamp", "line_number": 219, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 97, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 230, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 230, "usage_type": "name"}, {"api_name": "db.projects.get_by_name", "line_number": 241, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 241, "usage_type": "attribute"}, {"api_name": "utils.plural", "line_number": 253, "usage_type": "call"}, {"api_name": "utils.plural", "line_number": 255, "usage_type": "call"}, {"api_name": "db.projects.set", "line_number": 257, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 257, "usage_type": "attribute"}, {"api_name": "game_sync.get_mod_dependencies", "line_number": 261, "usage_type": "call"}, {"api_name": "utils.plural", "line_number": 263, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 264, "usage_type": "call"}, {"api_name": "game_sync.mods_dir", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.plural", "line_number": 270, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 229, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 284, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 284, "usage_type": "name"}, {"api_name": "db.projects.get_by_name", "line_number": 301, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "main.generate_request_headers", "line_number": 302, "usage_type": "call"}, {"api_name": "main.generate_path_cache", "line_number": 303, "usage_type": "call"}, {"api_name": "utils.get_user_github_account", "line_number": 317, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 320, "usage_type": "call"}, {"api_name": "main.headers", "line_number": 320, "usage_type": "attribute"}, {"api_name": "utils.handle_potential_request_error", "line_number": 321, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 322, "usage_type": "call"}, {"api_name": "ujson.loads", "line_number": 322, "usage_type": "call"}, {"api_name": "main.get_sha", "line_number": 326, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 330, "usage_type": "call"}, {"api_name": "main.headers", "line_number": 330, "usage_type": "attribute"}, {"api_name": "ujson.dumps", "line_number": 330, "usage_type": "call"}, {"api_name": "utils.handle_potential_request_error", "line_number": 331, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 332, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 337, "usage_type": "call"}, {"api_name": "main.get_sha", "line_number": 339, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 346, "usage_type": "call"}, {"api_name": "main.headers", "line_number": 346, "usage_type": "attribute"}, {"api_name": "ujson.dumps", "line_number": 346, "usage_type": "call"}, {"api_name": "utils.handle_potential_request_error", "line_number": 347, "usage_type": "call"}, {"api_name": "db.path_caches.enable_cache", "line_number": 350, "usage_type": "call"}, {"api_name": "db.path_caches", "line_number": 350, "usage_type": "attribute"}, {"api_name": "db.path_caches.remove_file", "line_number": 351, "usage_type": "call"}, {"api_name": "db.path_caches", "line_number": 351, "usage_type": "attribute"}, {"api_name": "db.path_caches.add_file", "line_number": 352, "usage_type": "call"}, {"api_name": "db.path_caches", "line_number": 352, "usage_type": "attribute"}, {"api_name": "db.path_caches.disable_cache", "line_number": 353, "usage_type": "call"}, {"api_name": "db.path_caches", "line_number": 353, "usage_type": "attribute"}, {"api_name": "main.edit_pin", "line_number": 358, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 283, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 369, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 369, "usage_type": "name"}, {"api_name": "db.projects.get_by_name", "line_number": 382, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 382, "usage_type": "attribute"}, {"api_name": "discord.NotFound", "line_number": 388, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 395, "usage_type": "call"}, {"api_name": "db.projects.set", "line_number": 400, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 400, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 401, "usage_type": "call"}, {"api_name": "main.edit_pin", "line_number": 405, "usage_type": "call"}, {"api_name": "db.projects.set", "line_number": 409, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 409, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 410, "usage_type": "call"}, {"api_name": "main.edit_pin", "line_number": 414, "usage_type": "call"}, {"api_name": "utils.detailed_user", "line_number": 416, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 368, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 422, "usage_type": "attribute"}, {"api_name": "db.projects.get_all", "line_number": 440, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 440, "usage_type": "attribute"}, {"api_name": "main.projects_count", "line_number": 446, "usage_type": "call"}, {"api_name": "time.time", "line_number": 449, "usage_type": "call"}, {"api_name": "main.login_time", "line_number": 449, "usage_type": "attribute"}, {"api_name": "db.history_log.size", "line_number": 451, "usage_type": "call"}, {"api_name": "db.history_log", "line_number": 451, "usage_type": "attribute"}, {"api_name": "utils.plural", "line_number": 452, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 459, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 459, "usage_type": "name"}, {"api_name": "db.projects.get_by_name", "line_number": 482, "usage_type": "call"}, {"api_name": "db.projects", "line_number": 482, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 495, "usage_type": "call"}, {"api_name": "utils.plural", "line_number": 508, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 458, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 520, "usage_type": "attribute"}, {"api_name": "discord.Message", "line_number": 530, "usage_type": "attribute"}, {"api_name": "utils.detailed_user", "line_number": 533, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 539, "usage_type": "name"}, {"api_name": "discord.Client", "line_number": 539, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 540, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 540, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 541, "usage_type": "call"}]} +{"seq_id": "7154545634", "text": "from __future__ import annotations\n\nfrom datetime import date, datetime\nfrom typing import Protocol, Type, Union, cast\nfrom uuid import uuid4\n\nimport pytest\nfrom _pytest.fixtures import FixtureRequest\nfrom sqlalchemy import String\nfrom sqlalchemy.orm import Mapped, mapped_column\n\nfrom litestar.contrib.sqlalchemy import base\nfrom litestar.repository.exceptions import ConflictError, RepositoryError\nfrom litestar.repository.filters import LimitOffset\nfrom litestar.repository.testing.generic_mock_repository import (\n GenericAsyncMockRepository,\n GenericSyncMockRepository,\n)\nfrom tests.helpers import maybe_async\nfrom tests.unit.test_repository.models_uuid import UUIDAuthor, UUIDBook\n\nAuthorRepository = GenericAsyncMockRepository[UUIDAuthor]\nAuthorRepositoryType = Type[AuthorRepository]\nModelType = Type[Union[base.UUIDBase, base.BigIntBase]]\nAuditModelType = Type[Union[base.UUIDAuditBase, base.BigIntAuditBase]]\n\n\nclass CreateAuditModelFixture(Protocol):\n def __call__(self, extra_columns: dict[str, type[Mapped] | Mapped] | None = None) -> AuditModelType:\n ...\n\n\n@pytest.fixture(name=\"authors\")\ndef fx_authors() -> list[UUIDAuthor]:\n \"\"\"Collection of Author models.\"\"\"\n return [\n UUIDAuthor(id=uuid4(), name=name, dob=dob, created_at=datetime.min, updated_at=datetime.min)\n for name, dob in [(\"Agatha Christie\", date(1890, 9, 15)), (\"Leo Tolstoy\", date(1828, 9, 9))]\n ]\n\n\n@pytest.fixture(params=[GenericAsyncMockRepository, GenericSyncMockRepository], ids=[\"async\", \"sync\"])\ndef repository_type(request: FixtureRequest) -> type[GenericAsyncMockRepository]:\n return cast(\"type[GenericAsyncMockRepository]\", request.param)\n\n\n@pytest.fixture(name=\"author_repository_type\", params=[GenericAsyncMockRepository, GenericSyncMockRepository])\ndef fx_author_repository_type(\n authors: list[UUIDAuthor], monkeypatch: pytest.MonkeyPatch, repository_type: type[GenericAsyncMockRepository]\n) -> AuthorRepositoryType:\n \"\"\"Mock Author repository, pre-seeded with collection data.\"\"\"\n repo = repository_type[UUIDAuthor] # type: ignore[index]\n repo.seed_collection(authors)\n return cast(\"type[GenericAsyncMockRepository]\", repo)\n\n\n@pytest.fixture(name=\"author_repository\")\ndef fx_author_repository(\n author_repository_type: type[GenericAsyncMockRepository[UUIDAuthor]],\n) -> GenericAsyncMockRepository[UUIDAuthor]:\n \"\"\"Mock Author repository instance.\"\"\"\n return author_repository_type()\n\n\n@pytest.fixture(params=[base.UUIDBase, base.BigIntBase])\ndef model_type(request: FixtureRequest) -> ModelType:\n return cast(ModelType, type(f\"{request.node.nodeid}Model\", (request.param,), {}))\n\n\n@pytest.fixture(params=[base.UUIDAuditBase, base.BigIntAuditBase])\ndef create_audit_model_type(request: FixtureRequest) -> CreateAuditModelFixture:\n def create(extra_columns: dict[str, type[Mapped] | Mapped] | None = None) -> AuditModelType:\n return cast(AuditModelType, type(f\"{request.node.nodeid}AuditModel\", (request.param,), extra_columns or {}))\n\n return create\n\n\n@pytest.fixture()\ndef audit_model_type(create_audit_model_type: CreateAuditModelFixture) -> AuditModelType:\n return create_audit_model_type()\n\n\nasync def test_repo_raises_conflict_if_add_with_id(\n authors: list[UUIDAuthor], author_repository: AuthorRepository\n) -> None:\n \"\"\"Test mock repo raises conflict if add identified entity.\"\"\"\n with pytest.raises(ConflictError):\n await maybe_async(author_repository.add(authors[0]))\n\n\nasync def test_repo_raises_conflict_if_add_many_with_id(\n authors: list[UUIDAuthor], author_repository: AuthorRepository\n) -> None:\n \"\"\"Test mock repo raises conflict if add identified entity.\"\"\"\n with pytest.raises(ConflictError):\n await maybe_async(author_repository.add_many(authors))\n\n\ndef test_generic_mock_repository_parametrization(repository_type: type[GenericAsyncMockRepository]) -> None:\n \"\"\"Test that the mock repository handles multiple types.\"\"\"\n author_repo = repository_type[UUIDAuthor] # type: ignore[index]\n book_repo = repository_type[UUIDBook] # type: ignore[index]\n assert author_repo.model_type is UUIDAuthor\n assert book_repo.model_type is UUIDBook\n\n\ndef test_generic_mock_repository_seed_collection(author_repository_type: AuthorRepositoryType) -> None:\n \"\"\"Test seeding instances.\"\"\"\n author_repository_type.seed_collection([UUIDAuthor(id=\"abc\")])\n assert \"abc\" in author_repository_type.collection\n\n\ndef test_generic_mock_repository_clear_collection(author_repository_type: AuthorRepositoryType) -> None:\n \"\"\"Test clearing collection for type.\"\"\"\n author_repository_type.clear_collection()\n assert not author_repository_type.collection\n\n\ndef test_generic_mock_repository_filter_collection_by_kwargs(author_repository: AuthorRepository) -> None:\n \"\"\"Test filtering the repository collection by kwargs.\"\"\"\n collection = author_repository.filter_collection_by_kwargs(author_repository.collection, name=\"Leo Tolstoy\")\n assert len(collection) == 1\n assert next(iter(collection.values())).name == \"Leo Tolstoy\"\n\n\ndef test_generic_mock_repository_filter_collection_by_kwargs_and_semantics(author_repository: AuthorRepository) -> None:\n \"\"\"Test that filtering by kwargs has `AND` semantics when multiple kwargs,\n not `OR`.\"\"\"\n collection = author_repository.filter_collection_by_kwargs(\n author_repository.collection, name=\"Agatha Christie\", dob=\"1828-09-09\"\n )\n assert len(collection) == 0\n\n\ndef test_generic_mock_repository_raises_repository_exception_if_named_attribute_doesnt_exist(\n author_repository: AuthorRepository,\n) -> None:\n \"\"\"Test that a repo exception is raised if a named attribute doesn't\n exist.\"\"\"\n with pytest.raises(RepositoryError):\n _ = author_repository.filter_collection_by_kwargs(author_repository.collection, cricket=\"ball\")\n\n\nasync def test_sets_created_updated_on_add(\n repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType\n) -> None:\n \"\"\"Test that the repository updates the 'created_at' and 'updated_at' timestamps\n if necessary.\"\"\"\n\n instance = audit_model_type()\n assert \"created_at\" not in vars(instance)\n assert \"updated_at\" not in vars(instance)\n\n instance = await maybe_async(repository_type[audit_model_type]().add(instance)) # type: ignore[index]\n assert \"created_at\" in vars(instance)\n assert \"updated_at\" in vars(instance)\n\n\nasync def test_sets_updated_on_update(author_repository: AuthorRepository) -> None:\n \"\"\"Test that the repository updates the 'updated' timestamp if\n necessary.\"\"\"\n\n instance = next(iter(author_repository.collection.values()))\n original_updated = instance.updated_at\n instance = await maybe_async(author_repository.update(instance))\n assert instance.updated_at > original_updated\n\n\nasync def test_does_not_set_created_updated(\n repository_type: type[GenericAsyncMockRepository], model_type: ModelType\n) -> None:\n \"\"\"Test that the repository does not update the 'updated' timestamps when\n appropriate.\"\"\"\n\n instance = model_type()\n repo = repository_type[model_type]() # type: ignore[index]\n assert \"created_at\" not in vars(instance)\n assert \"updated_at\" not in vars(instance)\n\n instance = await maybe_async(repo.add(instance))\n assert \"created_at\" not in vars(instance)\n assert \"updated_at\" not in vars(instance)\n\n instance = await maybe_async(repo.update(instance))\n assert \"created_at\" not in vars(instance)\n assert \"updated_at\" not in vars(instance)\n\n\nasync def test_add(repository_type: type[GenericAsyncMockRepository], model_type: ModelType) -> None:\n \"\"\"Test that the repository add method works correctly.\"\"\"\n\n instance = model_type()\n\n inserted_uuid_instance = await maybe_async(repository_type[model_type]().add(instance)) # type: ignore[index]\n assert inserted_uuid_instance == instance\n\n\nasync def test_add_many(repository_type: type[GenericAsyncMockRepository], model_type: ModelType) -> None:\n \"\"\"Test that the repository add_many method works correctly.\"\"\"\n\n instances = [model_type(), model_type()]\n\n inserted_uuid_instances = await maybe_async(repository_type[model_type]().add_many(instances)) # type: ignore[index]\n\n assert len(instances) == len(inserted_uuid_instances)\n\n\nasync def test_update(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository update method works correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n mock_repo = repository_type[Model]() # type: ignore[index]\n\n instance = await maybe_async(mock_repo.add(Model(random_column=\"A\")))\n instance.random_column = \"B\"\n updated_instance = await maybe_async(mock_repo.update(instance))\n\n assert updated_instance == instance\n\n\nasync def test_update_many(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository add_many method works correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n mock_repo = repository_type[Model]() # type: ignore[index]\n instances = [Model(random_column=\"A\"), Model(random_column=\"B\")]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n for instance in inserted_instances:\n instance.random_column = \"C\"\n updated_instances = await maybe_async(mock_repo.update_many(instances))\n for instance in updated_instances:\n assert instance.random_column == \"C\"\n assert len(instances) == len(updated_instances)\n\n\nasync def test_upsert(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository upsert method works correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n mock_repo = repository_type[Model]() # type: ignore[index]\n\n instance = await maybe_async(mock_repo.upsert(Model(random_column=\"A\")))\n instance.random_column = \"B\"\n updated_instance = await maybe_async(mock_repo.upsert(instance))\n\n assert updated_instance == instance\n\n\nasync def test_upsert_many(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository upsert method works correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n mock_repo = repository_type[Model]() # type: ignore[index]\n\n instance = await maybe_async(mock_repo.upsert(Model(random_column=\"A\")))\n instance.random_column = \"B\"\n new_instance = Model(random_column=\"C\")\n updated_instances = await maybe_async(mock_repo.upsert_many([instance, new_instance]))\n\n assert new_instance in updated_instances\n assert instance in updated_instances\n\n\nasync def test_list(repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType) -> None:\n \"\"\"Test that the repository list returns records.\"\"\"\n\n mock_repo = repository_type[audit_model_type]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many([audit_model_type(), audit_model_type()]))\n listed_instances = await maybe_async(mock_repo.list())\n assert inserted_instances == listed_instances\n\n\nasync def test_delete(repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType) -> None:\n \"\"\"Test that the repository delete functionality.\"\"\"\n\n mock_repo = repository_type[audit_model_type]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many([audit_model_type(), audit_model_type()]))\n delete_instance = await maybe_async(mock_repo.delete(inserted_instances[0].id))\n assert delete_instance.id == inserted_instances[0].id\n count = await maybe_async(mock_repo.count())\n assert count == 1\n\n\nasync def test_delete_many(repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType) -> None:\n \"\"\"Test that the repository delete many functionality.\"\"\"\n\n mock_repo = repository_type[audit_model_type]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many([audit_model_type(), audit_model_type()]))\n delete_instances = await maybe_async(mock_repo.delete_many([obj.id for obj in inserted_instances]))\n assert len(delete_instances) == 2\n count = await maybe_async(mock_repo.count())\n assert count == 0\n\n\nasync def test_list_and_count(\n repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType\n) -> None:\n \"\"\"Test that the repository list_and_count returns records and the total record count.\"\"\"\n\n instances = [audit_model_type(), audit_model_type()]\n mock_repo = repository_type[audit_model_type]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n listed_instances, count = await maybe_async(mock_repo.list_and_count())\n assert inserted_instances == listed_instances\n assert count == len(instances)\n\n\nasync def test_exists(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository exists returns booleans.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n instances = [Model(random_column=\"value 1\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n _ = await maybe_async(mock_repo.add_many(instances))\n exists = await maybe_async(mock_repo.exists(random_column=\"value 1\"))\n assert exists\n\n\nasync def test_exists_with_filter(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository exists returns booleans. with filter argument\"\"\"\n limit_filter = LimitOffset(limit=1, offset=0)\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n instances = [Model(random_column=\"value 1\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n _ = await maybe_async(mock_repo.add_many(instances))\n exists = await maybe_async(mock_repo.exists(limit_filter, random_column=\"value 1\"))\n assert exists\n\n\nasync def test_count(repository_type: type[GenericAsyncMockRepository], audit_model_type: AuditModelType) -> None:\n \"\"\"Test that the repository count returns the total record count.\"\"\"\n\n instances = [audit_model_type(), audit_model_type()]\n mock_repo = repository_type[audit_model_type]() # type: ignore[index]\n _ = await maybe_async(mock_repo.add_many(instances))\n count = await maybe_async(mock_repo.count())\n assert count == len(instances)\n\n\nasync def test_get(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository get returns a model record correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n instances = [Model(random_column=\"value 1\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n item_id = inserted_instances[0].id\n fetched_instance = await maybe_async(mock_repo.get(item_id))\n assert inserted_instances[0] == fetched_instance\n\n\nasync def test_get_one(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository get_one returns a model record correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n instances = [Model(random_column=\"value 1\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n fetched_instance = await maybe_async(mock_repo.get_one(random_column=\"value 1\"))\n assert inserted_instances[0] == fetched_instance\n with pytest.raises(RepositoryError):\n _ = await maybe_async(mock_repo.get_one(random_column=\"value 3\"))\n\n\nasync def test_get_one_or_none(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository get_one_or_none returns a model record correctly.\"\"\"\n\n Model = create_audit_model_type({\"random_column\": Mapped[str]})\n\n instances = [Model(random_column=\"value 1\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n fetched_instance = await maybe_async(mock_repo.get_one_or_none(random_column=\"value 1\"))\n assert inserted_instances[0] == fetched_instance\n none_instance = await maybe_async(mock_repo.get_one_or_none(random_column=\"value 3\"))\n assert none_instance is None\n\n\nasync def test_get_or_create(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository get_or_create returns a model record correctly.\"\"\"\n\n Model = create_audit_model_type(\n {\"random_column\": Mapped[str], \"cool_attribute\": mapped_column(String, nullable=True)}\n )\n\n instances = [Model(random_column=\"value 1\", cool_attribute=\"yep\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n fetched_instance, fetched_created = await maybe_async(mock_repo.get_or_create(random_column=\"value 2\"))\n assert await maybe_async(mock_repo.count()) == 2\n assert inserted_instances[1] == fetched_instance\n assert fetched_created is False\n _, created = await maybe_async(mock_repo.get_or_create(random_column=\"value 3\"))\n assert await maybe_async(mock_repo.count()) == 3\n assert created\n\n\nasync def test_get_or_create_match_fields(\n repository_type: type[GenericAsyncMockRepository], create_audit_model_type: CreateAuditModelFixture\n) -> None:\n \"\"\"Test that the repository get_or_create returns a model record correctly.\"\"\"\n\n Model = create_audit_model_type(\n {\"random_column\": Mapped[str], \"cool_attribute\": mapped_column(String, nullable=True)}\n )\n\n instances = [Model(random_column=\"value 1\", cool_attribute=\"yep\"), Model(random_column=\"value 2\")]\n mock_repo = repository_type[Model]() # type: ignore[index]\n inserted_instances = await maybe_async(mock_repo.add_many(instances))\n fetched_instance, fetched_created = await maybe_async(\n mock_repo.get_or_create(match_fields=[\"random_column\"], random_column=\"value 1\", cool_attribute=\"other thing\")\n )\n assert await maybe_async(mock_repo.count()) == 2\n assert inserted_instances[0] == fetched_instance\n assert fetched_created is False\n", "repo_name": "litestar-org/litestar", "sub_path": "tests/unit/test_repository/test_generic_mock_repository.py", "file_name": "test_generic_mock_repository.py", "file_ext": "py", "file_size_in_byte": 18940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3458, "dataset": "github-code", "pt": "42", "api": [{"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 22, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.UUIDBase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "litestar.contrib.sqlalchemy.base", "line_number": 24, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.BigIntBase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 25, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.UUIDAuditBase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "litestar.contrib.sqlalchemy.base", "line_number": 25, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.BigIntAuditBase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "typing.Protocol", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 29, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 37, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.min", "line_number": 37, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 34, "usage_type": "name"}, {"api_name": "_pytest.fixtures.FixtureRequest", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 42, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 42, "usage_type": "name"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericSyncMockRepository", "line_number": 42, "usage_type": "name"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 43, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 49, "usage_type": "name"}, {"api_name": "pytest.MonkeyPatch", "line_number": 49, "usage_type": "attribute"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 49, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 47, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 47, "usage_type": "name"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericSyncMockRepository", "line_number": 47, "usage_type": "name"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 59, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 59, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 57, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 60, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 60, "usage_type": "name"}, {"api_name": "_pytest.fixtures.FixtureRequest", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 65, "usage_type": "call"}, {"api_name": "litestar.contrib.sqlalchemy.base.UUIDBase", "line_number": 65, "usage_type": "attribute"}, {"api_name": "litestar.contrib.sqlalchemy.base", "line_number": 65, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.BigIntBase", "line_number": 65, "usage_type": "attribute"}, {"api_name": "_pytest.fixtures.FixtureRequest", "line_number": 71, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 70, "usage_type": "call"}, {"api_name": "litestar.contrib.sqlalchemy.base.UUIDAuditBase", "line_number": 70, "usage_type": "attribute"}, {"api_name": "litestar.contrib.sqlalchemy.base", "line_number": 70, "usage_type": "name"}, {"api_name": "litestar.contrib.sqlalchemy.base.BigIntAuditBase", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 78, "usage_type": "call"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 84, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 87, "usage_type": "call"}, {"api_name": "litestar.repository.exceptions.ConflictError", "line_number": 87, "usage_type": "argument"}, {"api_name": "tests.helpers.maybe_async", "line_number": 88, "usage_type": "call"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 92, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 95, "usage_type": "call"}, {"api_name": "litestar.repository.exceptions.ConflictError", "line_number": 95, "usage_type": "argument"}, {"api_name": "tests.helpers.maybe_async", "line_number": 96, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 99, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 101, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDBook", "line_number": 102, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 103, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDBook", "line_number": 104, "usage_type": "name"}, {"api_name": "tests.unit.test_repository.models_uuid.UUIDAuthor", "line_number": 109, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 140, "usage_type": "call"}, {"api_name": "litestar.repository.exceptions.RepositoryError", "line_number": 140, "usage_type": "argument"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 145, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 154, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 165, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 170, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 180, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 184, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 189, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 194, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 198, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 203, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 209, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 213, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 217, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 219, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 225, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 229, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 233, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 236, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 243, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 247, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 251, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 253, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 259, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 263, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 267, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 270, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 276, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 280, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 281, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 285, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 289, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 290, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 292, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 296, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 300, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 301, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 303, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 308, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 314, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 315, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 321, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 325, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 329, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 330, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 335, "usage_type": "name"}, {"api_name": "litestar.repository.filters.LimitOffset", "line_number": 338, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 340, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 344, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 345, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 349, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 354, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 355, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 360, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 364, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 368, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 370, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 375, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 379, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 383, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 384, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 386, "usage_type": "call"}, {"api_name": "litestar.repository.exceptions.RepositoryError", "line_number": 386, "usage_type": "argument"}, {"api_name": "tests.helpers.maybe_async", "line_number": 387, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 391, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 395, "usage_type": "name"}, {"api_name": "tests.helpers.maybe_async", "line_number": 399, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 400, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 402, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 407, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 412, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 412, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 412, "usage_type": "argument"}, {"api_name": "tests.helpers.maybe_async", "line_number": 417, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 418, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 419, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 422, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 423, "usage_type": "call"}, {"api_name": "litestar.repository.testing.generic_mock_repository.GenericAsyncMockRepository", "line_number": 428, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Mapped", "line_number": 433, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.mapped_column", "line_number": 433, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 433, "usage_type": "argument"}, {"api_name": "tests.helpers.maybe_async", "line_number": 438, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 439, "usage_type": "call"}, {"api_name": "tests.helpers.maybe_async", "line_number": 442, "usage_type": "call"}]} +{"seq_id": "28487496135", "text": "#!/usr/bin/python3\n'''\nSend a request to a URL and display the body of the response\n'''\n\nimport sys\nimport requests\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n print('Usage: ', __file__, 'URL', file=sys.stderr)\n sys.exit(1)\n\n resp = requests.get(sys.argv[1])\n\n if resp.status_code >= 400:\n print('Error code:', resp.status_code)\n else:\n print(resp.text)\n", "repo_name": "patrickdeyoreo/holbertonschool-higher_level_programming", "sub_path": "0x11-python-network_1/7-error_code.py", "file_name": "7-error_code.py", "file_ext": "py", "file_size_in_byte": 405, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "33", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "39537787482", "text": "\"\"\"sm URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, re_path, include\nfrom api import views\n\napp_name = 'api'\n\nurlpatterns = [\n # 管理员登陆\n path('adminLogin/', views.LoginView.as_view()),\n # 用户登录\n path('userLogin/', views.UserLoginView.as_view()),\n # 用户头像\n path('userAvatar/', views.UserAvatar.as_view()),\n # 获取左边栏菜单\n path('menus/', views.Menus.as_view()),\n # 获取每日推荐轮换图\n path('carouselPics/', views.CarouselPics.as_view()),\n # 修改用户可用状态\n path('users//state/', views.ChangeActive.as_view()),\n # 商品分类增删改查\n path('categories/', views.Categories.as_view()),\n # 根据ID获取用户信息\n path('users/', views.get_info_by_id),\n # 用户数据增删改查\n path('users/', views.Users.as_view()),\n # 检查用户名是否可用\n path('checkUsable/', views.check_useable),\n # 检查商品类别名是否可用\n path('checkCateNameUsable/', views.check_cate_name_useable),\n # 商品增删改查\n path('goods/', views.Goods.as_view()),\n # 查询商品\n path('good/', views.Good.as_view()),\n # 图片上传接口\n path('itemPics/', views.ItemPics.as_view()),\n # 推荐商品列表\n path('recommendList/', views.recommendList.as_view()),\n # 获取全部商品页面信息\n path('getAllGood/', views.getAllGoodBreif),\n # 获取订单列表\n path('orders/', views.Orders.as_view()),\n # 获取订单列表(用户级)\n path('userOrder/', views.UserOrders.as_view()),\n # 搜索商品\n path('searchItem/', views.SearchItem.as_view()),\n # 搜索菜谱\n path('searchCookbook/', views.SearchCookbook.as_view()),\n # 支付接口\n path('payment/', views.Pay.as_view()),\n # 获取物流信息\n path('kuaidi/', views.Kuaidi.as_view()),\n # 用户收货地址相关\n path('delivery/', views.Delivery.as_view()),\n # 用户收货地址(用户级)\n path('userDelivery/', views.UserDelivery.as_view()),\n # 获取商家列表\n path('merchant/', views.Merchant.as_view()),\n # 菜谱相关\n path('cookbooks/', views.CookBook.as_view()),\n # 获取我的菜谱\n path('myBook/', views.MyBook.as_view()),\n # 获取未被选择的店铺管理员\n path('merchantAdmin/', views.get_merchant_admin),\n # 购物车\n path('cart/', views.Cart.as_view()),\n # 返回一个uuid\n path('uuid/', views.get_uuid),\n # 管理员欢迎页信息\n path('sold/', views.getHomePageData),\n # 随便吃什么接口\n path('randomCookbook/', views.RandomCookbook.as_view()),\n # 测试接口\n path('test/', views.test),\n # 新增随机用户\n path('randomuser//', views.newRandomUser),\n # 新增随机订单\n path('randomorder//', views.newRandomOrder),\n # 新增随机订单(指定用户)\n path('randomorder///', views.newRandomOrderOfSb),\n]\n", "repo_name": "Mustard030/vue-django-shop", "sub_path": "backend/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3631, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "33", "api": [{"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "api.views.LoginView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "api.views.LoginView", "line_number": 24, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "api.views.UserLoginView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "api.views.UserLoginView", "line_number": 26, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.UserAvatar.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.UserAvatar", "line_number": 28, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "api.views.Menus.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "api.views.Menus", "line_number": 30, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "api.views.CarouselPics.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "api.views.CarouselPics", "line_number": 32, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "api.views.ChangeActive.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "api.views.ChangeActive", "line_number": 34, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.Categories.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.Categories", "line_number": 36, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "api.views.get_info_by_id", "line_number": 38, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "api.views.Users.as_view", "line_number": 40, "usage_type": "call"}, {"api_name": "api.views.Users", "line_number": 40, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "api.views.check_useable", "line_number": 42, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 42, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "api.views.check_cate_name_useable", "line_number": 44, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 44, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "api.views.Goods.as_view", "line_number": 46, "usage_type": "call"}, {"api_name": "api.views.Goods", "line_number": 46, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 46, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "api.views.Good.as_view", "line_number": 48, "usage_type": "call"}, {"api_name": "api.views.Good", "line_number": 48, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 48, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "api.views.ItemPics.as_view", "line_number": 50, "usage_type": "call"}, {"api_name": "api.views.ItemPics", "line_number": 50, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 50, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "api.views.recommendList.as_view", "line_number": 52, "usage_type": "call"}, {"api_name": "api.views.recommendList", "line_number": 52, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 52, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "api.views.getAllGoodBreif", "line_number": 54, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 56, "usage_type": "call"}, {"api_name": "api.views.Orders.as_view", "line_number": 56, "usage_type": "call"}, {"api_name": "api.views.Orders", "line_number": 56, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 56, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "api.views.UserOrders.as_view", "line_number": 58, "usage_type": "call"}, {"api_name": "api.views.UserOrders", "line_number": 58, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 58, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 60, "usage_type": "call"}, {"api_name": "api.views.SearchItem.as_view", "line_number": 60, "usage_type": "call"}, {"api_name": "api.views.SearchItem", "line_number": 60, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 60, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 62, "usage_type": "call"}, {"api_name": "api.views.SearchCookbook.as_view", "line_number": 62, "usage_type": "call"}, {"api_name": "api.views.SearchCookbook", "line_number": 62, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 64, "usage_type": "call"}, {"api_name": "api.views.Pay.as_view", "line_number": 64, "usage_type": "call"}, {"api_name": "api.views.Pay", "line_number": 64, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 64, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "api.views.Kuaidi.as_view", "line_number": 66, "usage_type": "call"}, {"api_name": "api.views.Kuaidi", "line_number": 66, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 66, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 68, "usage_type": "call"}, {"api_name": "api.views.Delivery.as_view", "line_number": 68, "usage_type": "call"}, {"api_name": "api.views.Delivery", "line_number": 68, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 68, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 70, "usage_type": "call"}, {"api_name": "api.views.UserDelivery.as_view", "line_number": 70, "usage_type": "call"}, {"api_name": "api.views.UserDelivery", "line_number": 70, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 70, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 72, "usage_type": "call"}, {"api_name": "api.views.Merchant.as_view", "line_number": 72, "usage_type": "call"}, {"api_name": "api.views.Merchant", "line_number": 72, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 72, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 74, "usage_type": "call"}, {"api_name": "api.views.CookBook.as_view", "line_number": 74, "usage_type": "call"}, {"api_name": "api.views.CookBook", "line_number": 74, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 76, "usage_type": "call"}, {"api_name": "api.views.MyBook.as_view", "line_number": 76, "usage_type": "call"}, {"api_name": "api.views.MyBook", "line_number": 76, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 76, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 78, "usage_type": "call"}, {"api_name": "api.views.get_merchant_admin", "line_number": 78, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 78, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 80, "usage_type": "call"}, {"api_name": "api.views.Cart.as_view", "line_number": 80, "usage_type": "call"}, {"api_name": "api.views.Cart", "line_number": 80, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 80, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 82, "usage_type": "call"}, {"api_name": "api.views.get_uuid", "line_number": 82, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 82, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 84, "usage_type": "call"}, {"api_name": "api.views.getHomePageData", "line_number": 84, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 84, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 86, "usage_type": "call"}, {"api_name": "api.views.RandomCookbook.as_view", "line_number": 86, "usage_type": "call"}, {"api_name": "api.views.RandomCookbook", "line_number": 86, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 86, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 88, "usage_type": "call"}, {"api_name": "api.views.test", "line_number": 88, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 88, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 90, "usage_type": "call"}, {"api_name": "api.views.newRandomUser", "line_number": 90, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 90, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 92, "usage_type": "call"}, {"api_name": "api.views.newRandomOrder", "line_number": 92, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 92, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 94, "usage_type": "call"}, {"api_name": "api.views.newRandomOrderOfSb", "line_number": 94, "usage_type": "attribute"}, {"api_name": "api.views", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "20678934536", "text": "import discord\nfrom discord.ext import commands\nimport os\nfrom dotenv import load_dotenv\n\nbot = commands.Bot(command_prefix = \"c-\")\nbot.load_extension(\"jishaku\")\n\n@bot.event\nasync def on_ready():\n\tprint(\"Ready as\", bot.user)\n\nfor filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n bot.load_extension(f'cogs.{filename[:-3]}')\n\nload_dotenv(dotenv_path = \".env\")\ntoken = os.environ.get('token')\nbot.run(token)\n", "repo_name": "ssebastianoo/cloner", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 6, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "2915858388", "text": "\"\"\"\n@Author: Gyanendra\n@Date: 14/01/2021 \n@Last Modified by: Gyanendra\n@Last Modified time: 14/01/2021 \n@Title : Strings in Python\n\"\"\"\nimport logging\nimport sys\n\ndef function_length_string(str):\n \"\"\"\n Description:\n Function is Used for Counting the Character in String\n Parameter:\n Sample String\n Return:\n Length of String\n \"\"\"\n length = len(str) \n return length\n\nif __name__ == \"__main__\":\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', \n '%m-%d-%Y %H:%M:%S')\n\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setLevel(logging.DEBUG)\n stdout_handler.setFormatter(formatter)\n\n file_handler = logging.FileHandler('E:\\Python Workspace\\Core-Programs\\data_structure\\Strings\\string_file.log')\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n\n logger.addHandler(file_handler)\n logger.addHandler(stdout_handler)\n\n sample_str = \"Gyanendra Pratap Singh\"\n result = function_length_string(sample_str)\n logger.info(\"Here is the length of the string {} is {} \".format(sample_str, result))\n \n ", "repo_name": "Gyanendra-gif/Core-Programs", "sub_path": "data_structure/Strings/1_length.py", "file_name": "1_length.py", "file_ext": "py", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "33", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 30, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 31, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "4738699318", "text": "import streamlit as st\r\nimport pandas as pd\r\nimport joblib\r\n# import shap\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\n\r\npd.set_option('display.max_rows', None)\r\npd.set_option('display.max_columns', None)\r\n\r\n# 页面内容设置\r\n# 页面名称\r\nst.set_page_config(page_title=\"30d readmission\", layout=\"wide\")\r\n# 标题\r\nst.title('An online web-app for predicting 30-day readmission')\r\n\r\nst.markdown('_This is a webApp to predict the risk of 30-day unplanned all-cause readmission\\\r\n based on several features that you can see in the sidebar. Please adjust the\\\r\n value of each feature. After that, click on the Predict button at the bottom to\\\r\n see the prediction._', )\r\nst.markdown('## *Input Data:*')\r\n# 隐藏底部水印\r\nhide_st_style = \"\"\"\r\n