diff --git "a/082.jsonl" "b/082.jsonl" new file mode 100644--- /dev/null +++ "b/082.jsonl" @@ -0,0 +1,492 @@ +{"seq_id": "55234566", "text": "#!/usr/bin/python3\n# This script consumes a Star War API and search characters\nfrom sys import argv\nimport requests\n\nif __name__ == \"__main__\":\n params = {\"search\": argv[1]}\n url = \"http://swapi.co/api/people/?search=\"\n res = requests.get(url, params=params)\n try:\n d = res.json()\n print(\"Number of results:\", d.get(\"count\"))\n for result in d.get(\"results\"):\n print(result.get(\"name\"))\n except ValueError:\n print(\"Not a valid JSON\")\n", "sub_path": "0x11-python-network_1/101-starwars.py", "file_name": "101-starwars.py", "file_ext": "py", "file_size_in_byte": 487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "240450665", "text": "import requests\r\nfrom bs4 import BeautifulSoup \r\n\r\n\r\nheaders = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36'} \r\n\r\n\r\n\r\ndef main():\r\n baseUrl = 'http://ip.yqie.com/proxyhttps/'\r\n html = requests.get(baseUrl,headers=headers)\r\n soup = BeautifulSoup(html.text,'lxml')\r\n\r\n tr_list = soup.findAll('tr')[1:]\r\n\r\n # print(tr_list)\r\n for item in tr_list:\r\n https = item.findAll('td')[1]\r\n ip = item.findAll('td')[2]\r\n print(str(https) + ':' + str(ip))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()", "sub_path": "10.【高效&反反爬】ip池/XXG_getIP.py", "file_name": "XXG_getIP.py", "file_ext": "py", "file_size_in_byte": 617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "481735526", "text": "#-*- coding: UTF-8 -*-\nimport tornado.web\nimport tornado.httpserver\nfrom lib.util import Handler\nfrom lib.mysendmail import send_mail\nfrom connect.tornado.web import Route\n\nimport json\n\nclass MysqlHandler(Handler):\n importName = __name__\n\n@Route(\"/mysql/\")\nclass mysql(MysqlHandler):\n @tornado.web.authenticated\n def get(self):\n return self.render(\"mysql.html\")\n\n @tornado.web.authenticated\n def post(self):\n dbname = self.get_argument(\"dbname\",0)\n #if not dbname or not str(name).strip():\n # raise tornado.web.HTTPError(500, \"请填写dbname\")\n #comment = self.request.files['myfile']\n comment = self.get_argument(\"myfile\")\n username = self.current_user\n res = comment.lower()\n self.db.mysqlinsert(username,dbname,res)\n alert_context = \"提交成功! 请等待审核...\\n您提交的内容如下:\\n%s\" % res\n self.write(alert_context)\n\n@Route(\"/userinfo/(.*?)/\")\nclass userinfo(MysqlHandler):\n @tornado.web.authenticated\n def get(self,method):\n if method == \"show\":\n if self.current_user == \"admin\":\n result = self.db.mysqlonlineadminshow()\n else:\n result = self.db.mysqlonlineshow(self.current_user)\n if result:\n return self.render(\"user_info.html\",result=result)\n\n @tornado.web.authenticated\n def post(self,method):\n if method == \"updatemysql\":\n logid = self.get_argument(\"logid\")\n username = self.get_argument(\"username\")\n if self.current_user == \"admin\":\n if self.db.updateMysql(logid):\n self.write(\"上线成功\")\n temptext = \"你有新的数据库上线通知,请登录系统进行查看.\"\n send_mail(['%s@u17.com' % username,],'数据库上线通知','用户%s你好!

%s
' % (username,temptext))\n else:\n self.write(\"上线失败\")\n\n ", "sub_path": "src/mysql/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 2019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "lib.util.Handler", "line_number": 10, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 15, "usage_type": "name"}, {"api_name": "tornado.web.web", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 19, "usage_type": "name"}, {"api_name": "connect.tornado.web.Route", "line_number": 13, "usage_type": "call"}, {"api_name": "tornado.web.web", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 34, "usage_type": "name"}, {"api_name": "lib.mysendmail.send_mail", "line_number": 53, "usage_type": "call"}, {"api_name": "tornado.web.web", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 44, "usage_type": "name"}, {"api_name": "connect.tornado.web.Route", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "282455207", "text": "# -*- coding: utf-8 -*-\n# Cardboardlint is a cheap lint solution for pull requests.\n# Copyright (C) 2011-2017 The Cardboardlint Development Team\n#\n# This file is part of Cardboardlint.\n#\n# Cardboardlint is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# Cardboardlint is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see \n# --\n\"\"\"Linter using CPPLint.\n\nThis test calls the cpplint.py program, see https://github.com/google/styleguide\n\"\"\"\nfrom __future__ import print_function\n\nfrom cardboardlint.common import run_command, Message, Linter\n\n\n__all__ = ['linter_cpplint']\n\n\nDEFAULT_CONFIG = {\n # Filename filter rules\n 'filefilter': ['+ *.h', '+ *.h.in', '+ *.cpp', '+ *.c'],\n # Location of the file\n 'script': 'cpplint'\n}\n\n\ndef _has_failed(_returncode, stdout, _stderr):\n \"\"\"Determine if cpplint.py has failed.\"\"\"\n return 'FATAL' in stdout\n\n\ndef run_cpplint(config, filenames):\n \"\"\"Linter for cpplint.\n\n Parameters\n ----------\n config : dict\n Dictionary that contains the configuration for the linter\n Not supported\n filenames : list\n A list of filenames to check\n\n Returns\n -------\n messages : list\n The list of messages generated by the external linter.\n\n \"\"\"\n messages = []\n if len(filenames) > 0:\n # Call cpplint\n command = ([config['script'], '--linelength=100', '--filter=-runtime/int'] +\n filenames)\n output = run_command(command, has_failed=_has_failed)[1]\n\n # Parse the output of cpplint into standard return values\n for line in output.split('\\n')[:-1]:\n words = line.split()\n if len(words) == 0 or words[0].count(':') != 2:\n continue\n filename, lineno = words[0].split(':')[:2]\n description = ' '.join(words[1:-2])\n tag = words[-2]\n priority = words[-1]\n lineno = int(lineno)\n if lineno == 0:\n lineno = None\n messages.append(Message(filename, lineno, None, '%s %s %s' % (\n priority, tag, description)))\n return messages\n\n\n# pylint: disable=invalid-name\nlinter_cpplint = Linter('cpplint', run_cpplint, DEFAULT_CONFIG, language='cpp')\n", "sub_path": "cardboardlint/linter_cpplint.py", "file_name": "linter_cpplint.py", "file_ext": "py", "file_size_in_byte": 2720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "cardboardlint.common.run_command", "line_number": 67, "usage_type": "call"}, {"api_name": "cardboardlint.common.Message", "line_number": 81, "usage_type": "call"}, {"api_name": "cardboardlint.common.Linter", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "12650297", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 19 17:03:15 2019\n\n@author: fubao\n\"\"\"\n\n\n\n# overcome the imbalanced dataset \n# not use oversampling or undersampling\n\n# use bagging classifier \n# it's said that the skleran bagging classifier can not solve the imbalanced problem\n\n# https://towardsdatascience.com/having-an-imbalanced-dataset-here-is-how-you-can-solve-it-1640568947eb\n\n\n\nimport sys\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom imblearn.ensemble import BalancedBaggingClassifier\n\nfrom common_classifier import load_data_all_features\nfrom sklearn.metrics import confusion_matrix\n\n\ncurrent_file_cur = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, current_file_cur + '/..')\n\nfrom profiling.common_prof import dataDir2\n\n\n\n\n\n\ndef emsembleClassifierTrainTest(X, y):\n\n # Splitting the dataset into the Training set and Test set\n \n X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n print (\"X_train X_test shape:\", X_Train.shape, X_Test.shape)\n \n # Feature Scaling\n sc_X = StandardScaler()\n X_Train = sc_X.fit_transform(X_Train)\n X_Test = sc_X.transform(X_Test)\n \n # Fitting the classifier into the Training set\n classifier = BalancedBaggingClassifier(base_estimator=DecisionTreeClassifier(),n_estimators = 200,\n sampling_strategy='auto',\n replacement=False,\n random_state=0)\n \n classifier.fit(X_Train,Y_Train)\n \n # Predicting the test set results\n \n Y_Pred = classifier.predict(X_Test)\n \n # Making the Confusion Matrix \n accuracy = classifier.score(X_Test, Y_Test) \n cm = confusion_matrix(Y_Test, Y_Pred)\n \n training_accuracy = classifier.score(X_Train, Y_Train) \n\n print (\"rftTrainTest testing acc, cm: \", accuracy, cm)\n \n print (\"rftTrainTest training acc, cm: \", training_accuracy)\n\n \ndef executeTest_feature_most_expensive_config():\n '''\n execute classification, where features are calculated from the pose esimation result derived from the most expensive config\n '''\n video_dir_lst = ['output_006-cardio_condition-20mins/', 'output_008-Marathon-20mins/'\n ] \n \n for video_dir in video_dir_lst[1:2]: #[1:2]: #[0:1]:\n \n data_examples_dir = dataDir2 + video_dir + 'data_examples_files/'\n\n xfile = 'X_data_features_config-history-frms1-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n yfile = 'Y_data_features_config-history-frms1-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n \n #xfile = 'X_data_features_config-weighted_interval-history-frms1-5-10-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n #yfile = 'Y_data_features_config-weighted_interval-history-frms1-5-10-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n X,y= load_data_all_features(data_examples_dir, xfile, yfile)\n \n emsembleClassifierTrainTest(X,y)\n \n \ndef executeTest_feature_selected_config():\n '''\n '''\n data_examples_dir = dataDir2 + 'output_006-cardio_condition-20mins/' + 'data_examples_files_feature_selected_config/'\n\n #xfile = 'X_data_features_config-history-frms25-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n #yfile = 'Y_data_features_config-history-frms25-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n \n xfile = 'X_data_features_config-history-frms1-sampleNum35765.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n yfile = 'Y_data_features_config-history-frms1-sampleNum35765.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n X,y= load_data_all_features(data_examples_dir, xfile, yfile)\n emsembleClassifierTrainTest(X,y)\n\n\nif __name__== \"__main__\": \n \n executeTest_feature_most_expensive_config()\n \n #executeTest_feature_selected_config()\n\n", "sub_path": "backup_codes/classifierForSwitchConfig/classifier_ensembleClassifier.py", "file_name": "classifier_ensembleClassifier.py", "file_ext": "py", "file_size_in_byte": 4268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 54, "usage_type": "call"}, {"api_name": "imblearn.ensemble.BalancedBaggingClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 72, "usage_type": "call"}, {"api_name": "profiling.common_prof.dataDir2", "line_number": 90, "usage_type": "name"}, {"api_name": "common_classifier.load_data_all_features", "line_number": 97, "usage_type": "call"}, {"api_name": "profiling.common_prof.dataDir2", "line_number": 105, "usage_type": "name"}, {"api_name": "common_classifier.load_data_all_features", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "348203081", "text": "# -*- coding: utf-8 -*-\n\nimport urllib.request, re, datetime, os, time\nimport threading\nfrom PyQt5 import QtCore\n# html = urllib.request.urlopen(\"http://www.baidu.com\")\n#\n# print(html.getcode())\n\n\nclass Down :\n\n # 检查网址合法性\n def checkUrl(urlStr):\n if not (urlStr.startswith(\"http://\")) and not (urlStr.startswith(\"https://\")):\n return \"网址不正确(以http或者https开头)\"\n return \"\"\n\n def __init__(self):\n super().__init__()\n # self.status = 0\n self.showMessageFunc = 0\n # self.text = 0\n def getHtmlStr(self, urlStr, showMessageFunc):\n\n self.urlStr = urlStr\n self.showMessageFunc = showMessageFunc\n # 模拟请求\n headers = {\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',\n }\n req = urllib.request.Request(urlStr, headers=headers)\n\n # self.status.showMessage(\"解析网址中....\")\n\n try:\n html = urllib.request.urlopen(req)\n except:\n # return \"网址访问出错\"\n showMessageFunc(\"网址访问出错\")\n\n return\n else:\n # self.status.showMessage(\"解析图片地址中....\")\n return self.getImgs(html.read())\n\n def getImgs(self, htmlStr):\n print(htmlStr)\n # 普通网页图片解析\n reg = r'((https|http):[^\\s\";]*?\\.(jpg|png|jpeg))'\n imgArray = re.compile(reg).findall(htmlStr.decode('gbk', 'ignore'))\n print(len(imgArray))\n\n # 1688处理\n if self.urlStr.find(\"1688\")>0 and htmlStr.decode('gbk', 'ignore').find(\"detailUrl\")>0:\n reg1 = r'\"detailUrl\":\"(.*?)\"'\n aliUrlArray = re.compile(reg1).findall(htmlStr.decode('gbk', 'ignore'))\n if len(aliUrlArray)>0:\n print(aliUrlArray[0])\n # self.status.showMessage(\"读取1688图片详情网址...\")\n imgArray1 = self.getHtmlStr(aliUrlArray[0], self.showMessageFunc)\n if type(imgArray) == type(imgArray1) and len(imgArray1)>0:\n for item in imgArray1:\n imgArray.append(item)\n print(len(imgArray))\n return imgArray\n\n\n def saveImgs(self, imgArray, showMessageFunc):\n date = datetime.datetime.now()\n timeName = date.strftime(\"%H_%M_%S_imgs\")\n documentPath = os.path.join(os.path.expanduser(\"~\"), 'Desktop') + \"/\" + timeName\n os.makedirs(documentPath)\n # self.status.showMessage(\"创建桌面文件夹\"+timeName)\n\n threads = []\n for img in imgArray:\n try:\n # 处理img[0]\n urlStr = img[0]\n # print(urlStr)\n urlStr1 = urlStr.replace(r\"\\/\", \"/\").replace(\"small\", \"large\")\n print(urlStr1)\n conn = urllib.request.urlopen(urlStr1)\n except:\n print(\"错误\")\n else:\n print(\"成功\")\n # self.text.append(urlStr1)\n imgPath = documentPath + \"/\" + str(time.time()) + \".\" + img[2]\n f = open(imgPath, 'wb')\n f.write(conn.read())\n f.close()\n\n showMessageFunc(\"文件存储到桌面/{}下\".format(timeName))", "sub_path": "DownImgs/DownImg.py", "file_name": "DownImg.py", "file_ext": "py", "file_size_in_byte": 3358, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.request.request.Request", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 32, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 32, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 37, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 51, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 72, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 84, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 84, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 84, "usage_type": "name"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "138784821", "text": "from google_helpers.sheets_attendance import (\n get_sheet_as_df,\n MORNING,\n COL_EMAIL,\n COL_TIMESTAMP,\n)\nfrom attendance.models import MorningAttendance\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand, CommandError\nfrom datetime import datetime, timedelta\n\nUser = get_user_model()\n\n\ndef save_row(row):\n # print(f\"morning - {row[COL_EMAIL]}\")\n user, _ = User.objects.get_or_create(email=row[COL_EMAIL])\n MorningAttendance.objects.get_or_create(\n user=user,\n # timestamp=row[COL_TIMESTAMP],\n date=row[COL_TIMESTAMP].date(),\n defaults={\n \"plan_of_action\": row[\"plan_of_action\"],\n \"problems_forseen\": row[\"problems_forseen\"],\n \"requests\": row[\"requests\"],\n \"late_reason\": row[\"late_reason\"],\n \"score\": row[\"score\"],\n \"timestamp\": row[COL_TIMESTAMP],\n },\n )\n\n\ndef pull_morning_attendance(days):\n df = get_sheet_as_df(MORNING)\n df = df[df[\"Timestamp\"].dt.date == datetime.now().date() - timedelta(days=days)]\n df.apply(save_row, axis=1)\n print(df.head())\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"days\", type=int, nargs=\"?\", default=0)\n\n def handle(self, *args, **options):\n pull_morning_attendance(options[\"days\"])\n", "sub_path": "backend/attendance/management/commands/pull_morning_attendance.py", "file_name": "pull_morning_attendance.py", "file_ext": "py", "file_size_in_byte": 1362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 12, "usage_type": "call"}, {"api_name": "google_helpers.sheets_attendance.COL_EMAIL", "line_number": 17, "usage_type": "name"}, {"api_name": "attendance.models.MorningAttendance.objects.get_or_create", "line_number": 18, "usage_type": "call"}, {"api_name": "attendance.models.MorningAttendance.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "attendance.models.MorningAttendance", "line_number": 18, "usage_type": "name"}, {"api_name": "google_helpers.sheets_attendance.COL_TIMESTAMP", "line_number": 21, "usage_type": "name"}, {"api_name": "google_helpers.sheets_attendance.COL_TIMESTAMP", "line_number": 28, "usage_type": "name"}, {"api_name": "google_helpers.sheets_attendance.get_sheet_as_df", "line_number": 34, "usage_type": "call"}, {"api_name": "google_helpers.sheets_attendance.MORNING", "line_number": 34, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "45601718", "text": "#for creating a histogram that depicts the 50 most common words in the supplied text file\r\nfrom collections import Counter\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwords = open('tweets.txt', 'r').read()\r\nwords = words.split()\r\nword_count = map(lambda w:(w, words.count(w)),set(words))\r\nbow = dict(word_count)\r\n\r\n\r\nbow_cnt = Counter(bow)\r\nsorted_list = dict(bow_cnt.most_common(50))\r\n\r\n#print(sorted_list)\r\n\r\nlabels = list(sorted_list.keys())\r\nvalues = list(sorted_list.values())\r\n#print(labels)\r\n#print(values)\r\n\r\nindexes = np.arange(len(labels))\r\nwidth = 1\r\nplt.bar(indexes, values, 1)\r\nplt.xticks(indexes, labels, rotation = 'vertical')\r\nplt.show()\r\n\r\n\r\n", "sub_path": "most_freq_plt.py", "file_name": "most_freq_plt.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "235521949", "text": "# coding: utf8\nfrom __future__ import unicode_literals, print_function, division\nfrom unittest import TestCase\n\nfrom mock import patch, Mock\n\nfrom clldutils.testing import capture, capture_all\n\n\nclass Tests(TestCase):\n def test_ArgumentParser(self):\n from clldutils.clilib import ArgumentParser, ParserError, command\n\n def cmd(args):\n \"\"\"\n docstring\n \"\"\"\n if len(args.args) < 1:\n raise ParserError('not enough arguments')\n print(args.args[0])\n\n parser = ArgumentParser('pkg', cmd)\n\n with capture(parser.main, args=['help', 'cmd']) as out:\n self.assertIn('docstring', out)\n\n with capture(parser.main, args=['cmd', 'arg']) as out:\n self.assertIn('arg', out)\n\n self.assertEqual(parser.main(args=['cmd', 'arg']), 0)\n\n with capture(parser.main, args=['cmd']) as out:\n self.assertIn('not enough arguments', out)\n\n with capture_all(parser.main, args=['x']) as res:\n self.assertNotEqual(res[0], 0)\n self.assertTrue(res[1].startswith('invalid'))\n\n @command()\n def ls(args):\n \"\"\"\n my name is ls\n \"\"\"\n return\n\n @command(name='list')\n def f(args):\n \"\"\"\n my name is list\n \"\"\"\n return\n\n parser = ArgumentParser('pkg')\n with capture(parser.main, args=['help', 'ls']) as out:\n self.assertIn('my name is ls', out)\n\n with capture(parser.main, args=['help', 'list']) as out:\n self.assertIn('my name is list', out)\n\n self.assertEqual(parser.main(args=['ls', 'arg']), 0)\n self.assertEqual(parser.main(args=['list', 'arg']), 0)\n\n def test_cmd_error(self):\n from clldutils.clilib import ArgumentParser\n\n def cmd(args):\n raise ValueError\n\n parser = ArgumentParser('pkg', cmd)\n with self.assertRaises(ValueError):\n parser.main(args=['cmd'])\n\n self.assertEqual(parser.main(args=['cmd'], catch_all=True), 1)\n\n def test_confirm(self):\n from clldutils.clilib import confirm\n\n with patch('clldutils.clilib.input', Mock(return_value='')):\n self.assertTrue(confirm('a?'))\n self.assertFalse(confirm('a?', default=False))\n\n with patch('clldutils.clilib.input', Mock(side_effect=['x', 'y'])):\n with capture_all(confirm, 'a?') as res:\n self.assertTrue(res[0])\n self.assertIn('Please respond', res[1])\n", "sub_path": "clldutils/tests/test_clilib.py", "file_name": "test_clilib.py", "file_ext": "py", "file_size_in_byte": 2569, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "clldutils.clilib.ParserError", "line_number": 19, "usage_type": "call"}, {"api_name": "clldutils.clilib.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "clldutils.testing.capture", "line_number": 24, "usage_type": "call"}, {"api_name": "clldutils.testing.capture", "line_number": 27, "usage_type": "call"}, {"api_name": "clldutils.testing.capture", "line_number": 32, "usage_type": "call"}, {"api_name": "clldutils.testing.capture_all", "line_number": 35, "usage_type": "call"}, {"api_name": "clldutils.clilib.command", "line_number": 39, "usage_type": "call"}, {"api_name": "clldutils.clilib.command", "line_number": 46, "usage_type": "call"}, {"api_name": "clldutils.clilib.ArgumentParser", "line_number": 53, "usage_type": "call"}, {"api_name": "clldutils.testing.capture", "line_number": 54, "usage_type": "call"}, {"api_name": "clldutils.testing.capture", "line_number": 57, "usage_type": "call"}, {"api_name": "clldutils.clilib.ArgumentParser", "line_number": 69, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 78, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 78, "usage_type": "call"}, {"api_name": "clldutils.clilib.confirm", "line_number": 79, "usage_type": "call"}, {"api_name": "clldutils.clilib.confirm", "line_number": 80, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 82, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 82, "usage_type": "call"}, {"api_name": "clldutils.testing.capture_all", "line_number": 83, "usage_type": "call"}, {"api_name": "clldutils.clilib.confirm", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "5981626", "text": "\n\n\"\"\"PyAudio example: Record a few seconds of audio and save to a WAVE file.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pyaudio\nimport wave\nimport calcbytes as cb\nimport numpy as np\nimport struct\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\nRECORD_SECONDS = 1\nWAVE_OUTPUT_FILENAME = \"output.wav\"\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n\t\toutput=True,\n frames_per_buffer=CHUNK)\n\nprint(\"* recording\")\n\nframes = []\nframesint=np.array([])\n\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n framesint=np.concatenate((framesint,np.array(struct.unpack(\"%dh\" % (CHUNK), data)))) \n \n frames.append(data)\n\t\nprint(\"* done recording\")\n\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\n\nplt.plot(framesint)\nplt.show()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(CHANNELS)\nwf.setsampwidth(p.get_sample_size(FORMAT))\nwf.setframerate(RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n\n#if __name__ == '__main__':\n#\tmain()\n\n", "sub_path": "writeWave.py", "file_name": "writeWave.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pyaudio.paInt16", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "wave.open", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "434862214", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'music'\n\nurlpatterns = [\n path('result/', views.result, name='result'),\n\n #music/\n path('', views.IndexView.as_view(), name='Home'),\n\n #music/1\n path('/', views.DetailView.as_view(), name=\"Details\"),\n\n #music/album/add\n path('album/add', views.AlbumCreate.as_view(), name=\"album-add\"),\n\n #music/edit-album/1\n path('edit-album/', views.UpdateAlbumView.as_view(), name='update-album'),\n\n #music/album/1/delete\n path('album//delete', views.DeleteAlbumView.as_view(), name='delete-album'),\n\n #music/1/song/add\n path('/song/add', views.SongCreate.as_view(), name=\"song-add\"),\n\n #music/1/song/update/1/\n path('/song/update/', views.UpdateSong.as_view(), name=\"update-song\"),\n\n #music/1/song/delete/1\n path('/song/delete/', views.DeleteSong.as_view(), name=\"delete-song\"),\n\n path('/favourite/song/', views.favourite_song, name=\"favourite-song\")\n]\n\n", "sub_path": "website/music/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "411003176", "text": "from flask import request, jsonify\nimport sqlite3\n\n\ndef add_car():\n username = request.json['username']\n car_number = request.json['carNumber']\n db = sqlite3.connect('db.db')\n cur = db.cursor()\n try:\n cur.execute('INSERT INTO Cars (username,carNumber,isInside) VALUES (?,?,?)', (username, car_number, 0))\n db.commit()\n except Exception as e:\n print(e)\n return jsonify({'message': f'Car number {car_number} is alreday exist',\n 'status': 'failed'})\n finally:\n db.close()\n return jsonify({'message': 'car added',\n 'status': 'successful',\n 'carNumber': car_number,\n 'username': username})\n", "sub_path": "api/add_car.py", "file_name": "add_car.py", "file_ext": "py", "file_size_in_byte": 724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "flask.request.json", "line_number": 6, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 6, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 7, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 7, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "628778374", "text": "# pie game\r\n# first simple game\r\n# created by zhangliangliang 2017/6/28\r\nimport sys\r\nimport pygame\r\nimport math\r\nfrom pygame.locals import *\r\npygame.init()\r\nSCREEN = pygame.display.set_mode((600,500))\r\npygame.display.set_caption(\"Pie Game\")\r\nMY_FONT = pygame.font.Font(None, 60) #set font\r\n\r\nCOLOR = 200, 80, 60 #set color\r\nWIDTH = 40\r\nX = 300\r\nY = 250\r\nRADIUS = 250 \r\nPOSTION = X - RADIUS, Y - RADIUS, RADIUS * 2, RADIUS *2\r\n\r\npiece1 = False\r\npiece2 = False\r\npiece3 = False\r\npiece4 = False\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit()\r\n elif event.type == KEYUP: \r\n if event.key == pygame.K_1:\r\n piece1 = True\r\n elif event.key == pygame.K_2:\r\n piece2 = True\r\n elif event.key == pygame.K_3:\r\n piece3 = True\r\n elif event.key == pygame.K_4:\r\n piece4 = True\r\n \r\n #clear the screen\r\n SCREEN.fill((0, 200, 200))\r\n\r\n #draw the four numbers\r\n textImg1 = MY_FONT.render(\"1\", True, COLOR)\r\n SCREEN.blit(textImg1, (X + RADIUS / 2 - 20, Y - RADIUS / 2))\r\n textImg2 = MY_FONT.render(\"2\", True, COLOR)\r\n SCREEN.blit(textImg2, (X - RADIUS / 2, Y - RADIUS / 2))\r\n textImg3 = MY_FONT.render(\"3\", True, COLOR)\r\n SCREEN.blit(textImg3, (X - RADIUS / 2, Y + RADIUS / 2 - 20))\r\n textImg4 = MY_FONT.render(\"4\", True, COLOR)\r\n SCREEN.blit(textImg4, (X + RADIUS / 2 - 20, Y + RADIUS / 2 - 20))\r\n\r\n #which one should be drawn\r\n if piece1 :\r\n start_angle = math.radians(0)\r\n end_angle = math.radians(90)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y - RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X + RADIUS, Y), WIDTH)\r\n if piece2 :\r\n start_angle = math.radians(90)\r\n end_angle = math.radians(180)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y - RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X - RADIUS, Y), WIDTH)\r\n if piece3 :\r\n start_angle = math.radians(180)\r\n end_angle = math.radians(270)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X - RADIUS, Y), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y + RADIUS), WIDTH)\r\n if piece4 :\r\n start_angle = math.radians(270)\r\n end_angle = math.radians(360)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y + RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X + RADIUS, Y), WIDTH)\r\n\r\n #is the pie finished\r\n if piece1 and piece2 and piece3 and piece4 :\r\n COLOR = 0, 250, 0\r\n textImg5 = MY_FONT.render(\"You win!\", True, COLOR)\r\n SCREEN.blit(textImg5, (0,0))\r\n pygame.display.update()", "sub_path": "pygame_2.py", "file_name": "pygame_2.py", "file_ext": "py", "file_size_in_byte": 3050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pygame.init", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.K_1", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.K_2", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.K_3", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.K_4", "line_number": 36, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 54, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.draw.arc", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 60, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw.arc", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 64, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 66, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw.arc", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 72, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw.arc", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "306034637", "text": "import numpy as np\nimport keras\nimport keras.backend as K\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense,LeakyReLU,BatchNormalization\n\n\nx_train=np.load('/content/drive/My Drive/GreenDeck/15k_data_cnn_features.npy')\ny_train=np.load('/content/drive/My Drive/GreenDeck/15k_data_cnn_feature_asins.npy')\n\n\n\n\nclass Custom_lr(keras.callbacks.Callback):\n\n def on_train_begin(self, logs={}):\n\t K.set_value(self.model.optimizer.lr, 0.001)\n\n def on_epoch_begin(self, epoch, logs={}):\n lr_present=K.get_value(self.model.optimizer.lr)\n #print(epoch)\n if (epoch%10==0) and epoch:\n\n K.set_value(self.model.optimizer.lr, lr_present/((epoch)**0.5))\n print(K.get_value(self.model.optimizer.lr))\n print(lr_present/((epoch)**0.5))\n\ntop_model=Sequential()\ntop_model.add(Flatten(input_shape=xtrain.shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(BatchNormalization())\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(32, activation='relu'))\ntop_model.add(BatchNormalization())\ntop_model.add(Dropout(0.5))\n# top_model.add(BatchNormalization())\ntop_model.add(Dense(17, activation='softmax'))\n\ntop_model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\ncallbacks = [\n\t Custom_lr()\n\t]\ncheckpoint = ModelCheckpoint(\"./bottleneck_vgg16_model.h5\",\n monitor=\"val_loss\",\n mode=\"min\",\n save_best_only = True,\n verbose=1)\nearlystop = EarlyStopping(monitor = 'val_loss',\n mode=\"min\",\n min_delta = 0,\n patience = 5,\n verbose = 1,\n restore_best_weights = True)\n\nreduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 2,verbose = 1, min_delta = 0.0001)\n\n\n\ntop_model.fit(xtrain, ytrain,\n epochs=30,\n batch_size=32,\n validation_data=(xtest, ytest), callbacks=callbacks)\n\nscore = top_model.evaluate(xtest,ytest)\nprint(score)\n", "sub_path": "bottleneck_feature/train_bottleneck.py", "file_name": "train_bottleneck.py", "file_ext": "py", "file_size_in_byte": 2183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "numpy.load", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.callbacks", "line_number": 15, "usage_type": "attribute"}, {"api_name": "keras.backend.set_value", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 18, "usage_type": "name"}, {"api_name": "keras.backend.get_value", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 21, "usage_type": "name"}, {"api_name": "keras.backend.set_value", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 25, "usage_type": "name"}, {"api_name": "keras.backend.get_value", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "179288975", "text": "from flask import Flask\n\nfrom werkzeug.contrib.profiler import ProfilerMiddleware\n\nfrom .core import db, babel\n\n\ndef load_config(app, additional_config={}):\n \"\"\"Load configuration from environment variable plus from additional\n dictionary for test cases.\"\"\"\n app.config.from_envvar(\"FLASK_CONFIG\")\n app.config.update(additional_config)\n return app\n\n\ndef add_profiler(app):\n \"\"\"Add a profiler that runs on every request when PROFILE set to True.\"\"\"\n if app.config.get(\"PROFILE\", False):\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app,\n restrictions=[30],\n sort_by=(\"time\", \"cumulative\"))\n return app\n\n\ndef create_db(app, db):\n \"\"\"Create database from models.\"\"\"\n with app.app_context():\n db.create_all()\n\n\ndef create_app(additional_config={}, name=\"atlas_core\", standalone=False):\n \"\"\"App factory. Creates a Flask `app` object and imports extensions, sets\n config variables etc.\"\"\"\n\n app = Flask(name)\n app = load_config(app, additional_config)\n\n if not standalone:\n # Register blueprints\n from .sample.views import sample_app\n app.register_blueprint(sample_app)\n\n # Load extensions\n db.init_app(app)\n babel.init_app(app)\n\n # Debug tools\n if app.debug:\n app = add_profiler(app)\n\n if standalone:\n create_db(app, db)\n\n return app\n", "sub_path": "atlas_core/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1425, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "werkzeug.contrib.profiler.ProfilerMiddleware", "line_number": 19, "usage_type": "call"}, {"api_name": "core.db.create_all", "line_number": 28, "usage_type": "call"}, {"api_name": "core.db", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 35, "usage_type": "call"}, {"api_name": "sample.views.sample_app", "line_number": 41, "usage_type": "argument"}, {"api_name": "core.db.init_app", "line_number": 44, "usage_type": "call"}, {"api_name": "core.db", "line_number": 44, "usage_type": "name"}, {"api_name": "core.babel.init_app", "line_number": 45, "usage_type": "call"}, {"api_name": "core.babel", "line_number": 45, "usage_type": "name"}, {"api_name": "core.db", "line_number": 52, "usage_type": "argument"}]} +{"seq_id": "97872293", "text": "from . import plugin\n\nfrom flask_login import current_user, login_required\nfrom flask import render_template, request, Response\n\nfrom wtforms import StringField, FloatField\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired\nfrom wtforms.widgets import TextArea\n\nfrom wtforms.validators import DataRequired, URL\n\nfrom security.admin import is_admin\nfrom security.request import send_get_request, send_post_request, send_put_request\n\nimport os, json\n\nclass PluginForm(FlaskForm):\n video_url = StringField('Video URL', validators=[DataRequired()])\n image_urls = StringField('Image URLs', validators=[DataRequired()])\n\n name = StringField('Name', validators=[DataRequired()])\n\n price = StringField('Price', validators=[DataRequired()])\n\n description = StringField('Description', validators=[DataRequired()], widget=TextArea())\n\n plugin_file = FileField()\n\n@plugin.route('/plugin/add', methods=['GET', 'POST'])\n@login_required\n@is_admin\ndef plugin_add():\n\n form = PluginForm()\n\n if form.validate_on_submit():\n\n form_json = {\n\n 'video_url': form.video_url.data,\n 'image_urls': form.image_urls.data,\n 'name': form.name.data,\n 'price': form.price.data,\n 'description': form.description.data,\n 'path': 'files/{}'.format(form.plugin_file.data.filename),\n }\n\n resp = send_post_request({}, json.dumps(form_json), 'plugin/add')\n\n if resp.status_code == 201:\n form.plugin_file.data.save('files/{}'.format(form.plugin_file.data.filename))\n return 'Plugin added'\n else:\n return 'Failed to add plugin'\n\n return render_template('plugin/form.html', form=form, method='add')\n\n@plugin.route('/plugin/edit/', methods=['POST', 'GET'])\n@login_required\n@is_admin\ndef plugin_edit(plugin_id):\n resp = send_get_request({'plugin_id': plugin_id}, 'plugin/get')\n\n if resp.status_code != 200:\n return Response(status=resp.status_code)\n\n plugin_data = resp.json()\n\n form = PluginForm()\n\n if form.validate_on_submit():\n form_json = {\n 'id': plugin_id,\n 'video_url': form.video_url.data,\n 'image_urls': form.image_urls.data,\n 'name': form.name.data,\n 'price': form.price.data,\n 'description': form.description.data\n }\n\n if form.plugin_file.data != None:\n\n if os.path.isfile(plugin_data['path']):\n os.remove(plugin_data['path'])\n\n form_json['path'] = 'files/{}'.format(form.plugin_file.data.filename)\n form.plugin_file.data.save('files/{}'.format(form.plugin_file.data.filename))\n\n resp = send_put_request({}, json.dumps(form_json), 'plugin/edit')\n\n if resp.status_code == 200:\n return 'Plugin edited'\n\n return 'Plugin edit failed'\n\n form.description.data = plugin_data['description']\n form.video_url.data = plugin_data['video_url']\n form.image_urls.data = plugin_data['image_urls']\n form.price.data = plugin_data['price']\n form.name.data = plugin_data['name']\n\n return render_template('plugin/form.html', form=form, method=\"edit/{}\".format(plugin_id))\n", "sub_path": "plugin/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 3228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "flask_wtf.FlaskForm", "line_number": 18, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 19, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 19, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 20, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 20, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 22, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 22, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 24, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 24, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 26, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 26, "usage_type": "call"}, {"api_name": "wtforms.widgets.TextArea", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_wtf.file.FileField", "line_number": 28, "usage_type": "call"}, {"api_name": "security.request.send_post_request", "line_number": 49, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 57, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 31, "usage_type": "name"}, {"api_name": "security.admin.is_admin", "line_number": 32, "usage_type": "name"}, {"api_name": "security.request.send_get_request", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 85, "usage_type": "call"}, {"api_name": "security.request.send_put_request", "line_number": 90, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 60, "usage_type": "name"}, {"api_name": "security.admin.is_admin", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "333912010", "text": "'''\r\nCreated on Mar 30, 2016\r\n@author: anup\r\n'''\r\n\r\nimport re\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom collections import Counter\r\n\r\nclass Document(object):\r\n def __init__(self, topic, title, doc):\r\n \"\"\"Initial the Document object with tokens and topics\r\n \"\"\"\r\n if topic:\r\n self.terms = Counter()\r\n stopwrd = stopwords.words('english')\r\n self.topic = topic\r\n self.title = self.tokenize(title,stopwrd)\r\n self.tokens = self.tokenize(doc,stopwrd)\r\n \r\n def document_terms(self):\r\n return self.terms\r\n \r\n def tokenize(self,doc,stopwrd):\r\n \"\"\"Tokenize using whitespace and words only\"\"\"\r\n \r\n result = []\r\n document = doc\r\n if type(doc) is list:\r\n document = '.'.join(doc)\r\n \r\n for sent in sent_tokenize(document):\r\n sent = sent.strip('.')\r\n for token in nltk.word_tokenize(sent):\r\n token = token.lower().strip('.').strip('\\'')\r\n if token not in stopwrd and len(token) > 2 and re.match('^[0-9]*[a-zA-Z]+[0-9]*$', token):\r\n self.terms[token] += 1\r\n result.append(token)\r\n return result\r\n", "sub_path": "Cluster/Document.py", "file_name": "Document.py", "file_ext": "py", "file_size_in_byte": 1300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "collections.Counter", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 18, "usage_type": "name"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 34, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 36, "usage_type": "call"}, {"api_name": "re.match", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "127649068", "text": "from collections import (\n Mapping,\n)\nimport json\nimport os\n\nfrom cytoolz import (\n compose,\n)\nfrom eth_keyfile import (\n create_keyfile_json,\n decode_keyfile_json,\n)\nfrom eth_keys import (\n KeyAPI,\n keys,\n)\nfrom eth_keys.exceptions import (\n ValidationError,\n)\nfrom eth_utils import (\n is_dict,\n keccak,\n)\n\nfrom web3.utils.datastructures import (\n AttributeDict,\n HexBytes,\n)\nfrom web3.utils.decorators import (\n combomethod,\n)\nfrom web3.utils.encoding import (\n hexstr_if_str,\n text_if_str,\n to_bytes,\n to_int,\n)\nfrom web3.utils.signing import (\n LocalAccount,\n hash_of_signed_transaction,\n sign_message_hash,\n sign_transaction_dict,\n signature_wrapper,\n to_standard_signature_bytes,\n to_standard_v,\n)\nfrom web3.utils.transactions import (\n Transaction,\n vrs_from,\n)\n\n\nclass Account(object):\n _keys = keys\n\n @combomethod\n def create(self, extra_entropy=''):\n extra_key_bytes = text_if_str(to_bytes, extra_entropy)\n key_bytes = keccak(os.urandom(32) + extra_key_bytes)\n return self.privateKeyToAccount(key_bytes)\n\n @staticmethod\n def decrypt(keyfile_json, password):\n if isinstance(keyfile_json, str):\n keyfile = json.loads(keyfile_json)\n elif is_dict(keyfile_json):\n keyfile = keyfile_json\n else:\n raise TypeError(\"The keyfile should be supplied as a JSON string, or a dictionary.\")\n password_bytes = text_if_str(to_bytes, password)\n return decode_keyfile_json(keyfile, password_bytes)\n\n @staticmethod\n def encrypt(private_key, password):\n key_bytes = HexBytes(private_key)\n password_bytes = text_if_str(to_bytes, password)\n assert len(key_bytes) == 32\n return create_keyfile_json(key_bytes, password_bytes)\n\n @staticmethod\n def hashMessage(data=None, hexstr=None, text=None):\n message_bytes = to_bytes(data, hexstr=hexstr, text=text)\n recovery_hasher = compose(HexBytes, keccak, signature_wrapper)\n return recovery_hasher(message_bytes)\n\n @combomethod\n def privateKeyToAccount(self, private_key):\n key_bytes = HexBytes(private_key)\n try:\n key_obj = self._keys.PrivateKey(key_bytes)\n return LocalAccount(key_obj, self)\n except ValidationError as original_exception:\n raise ValueError(\n \"The private key must be exactly 32 bytes long, instead of \"\n \"%d bytes.\" % len(key_bytes)\n ) from original_exception\n\n @combomethod\n def recover(self, msghash, vrs=None, signature=None):\n hash_bytes = HexBytes(msghash)\n if vrs is not None:\n v, r, s = map(hexstr_if_str(to_int), vrs)\n v_standard = to_standard_v(v)\n signature_obj = self._keys.Signature(vrs=(v_standard, r, s))\n elif signature is not None:\n signature_bytes = HexBytes(signature)\n signature_bytes_standard = to_standard_signature_bytes(signature_bytes)\n signature_obj = self._keys.Signature(signature_bytes=signature_bytes_standard)\n else:\n raise TypeError(\"You must supply the vrs tuple or the signature bytes\")\n pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes)\n return pubkey.to_checksum_address()\n\n @combomethod\n def recoverMessage(self, data=None, hexstr=None, text=None, vrs=None, signature=None):\n msg_hash = self.hashMessage(data, hexstr=hexstr, text=text)\n return self.recover(msg_hash, vrs=vrs, signature=signature)\n\n @combomethod\n def recoverTransaction(self, serialized_transaction):\n txn_bytes = HexBytes(serialized_transaction)\n txn = Transaction.from_bytes(txn_bytes)\n msg_hash = hash_of_signed_transaction(txn)\n return self.recover(msg_hash, vrs=vrs_from(txn))\n\n def setKeyBackend(self, backend):\n self._keys = KeyAPI(backend)\n\n @combomethod\n def sign(self, message=None, private_key=None, message_hexstr=None, message_text=None):\n '''\n @param private_key in bytes, str, or int.\n '''\n msg_bytes = to_bytes(message, hexstr=message_hexstr, text=message_text)\n msg_hash = self.hashMessage(msg_bytes)\n key_bytes = HexBytes(private_key)\n key = self._keys.PrivateKey(key_bytes)\n (v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash)\n return AttributeDict({\n 'message': HexBytes(msg_bytes),\n 'messageHash': msg_hash,\n 'r': r,\n 's': s,\n 'v': v,\n 'signature': HexBytes(eth_signature_bytes),\n })\n\n @combomethod\n def signTransaction(self, transaction_dict, private_key):\n '''\n @param private_key in bytes, str, or int.\n '''\n assert isinstance(transaction_dict, Mapping)\n\n account = self.privateKeyToAccount(private_key)\n\n # sign transaction\n (\n v,\n r,\n s,\n rlp_encoded,\n ) = sign_transaction_dict(account._key_obj, transaction_dict)\n\n transaction_hash = keccak(rlp_encoded)\n\n return AttributeDict({\n 'rawTransaction': HexBytes(rlp_encoded),\n 'hash': HexBytes(transaction_hash),\n 'r': r,\n 's': s,\n 'v': v,\n })\n", "sub_path": "sdk/web3.py/web3/account.py", "file_name": "account.py", "file_ext": "py", "file_size_in_byte": 5351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "eth_keys.keys", "line_number": 55, "usage_type": "name"}, {"api_name": "web3.utils.encoding.text_if_str", "line_number": 59, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_bytes", "line_number": 59, "usage_type": "argument"}, {"api_name": "eth_utils.keccak", "line_number": 60, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 60, "usage_type": "call"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 57, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "eth_utils.is_dict", "line_number": 67, "usage_type": "call"}, {"api_name": "web3.utils.encoding.text_if_str", "line_number": 71, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_bytes", "line_number": 71, "usage_type": "argument"}, {"api_name": "eth_keyfile.decode_keyfile_json", "line_number": 72, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 76, "usage_type": "call"}, {"api_name": "web3.utils.encoding.text_if_str", "line_number": 77, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_bytes", "line_number": 77, "usage_type": "argument"}, {"api_name": "eth_keyfile.create_keyfile_json", "line_number": 79, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_bytes", "line_number": 83, "usage_type": "call"}, {"api_name": "cytoolz.compose", "line_number": 84, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 84, "usage_type": "argument"}, {"api_name": "eth_utils.keccak", "line_number": 84, "usage_type": "argument"}, {"api_name": "web3.utils.signing.signature_wrapper", "line_number": 84, "usage_type": "argument"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 89, "usage_type": "call"}, {"api_name": "web3.utils.signing.LocalAccount", "line_number": 92, "usage_type": "call"}, {"api_name": "eth_keys.exceptions.ValidationError", "line_number": 93, "usage_type": "name"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 87, "usage_type": "name"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 101, "usage_type": "call"}, {"api_name": "web3.utils.encoding.hexstr_if_str", "line_number": 103, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_int", "line_number": 103, "usage_type": "argument"}, {"api_name": "web3.utils.signing.to_standard_v", "line_number": 104, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 107, "usage_type": "call"}, {"api_name": "web3.utils.signing.to_standard_signature_bytes", "line_number": 108, "usage_type": "call"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 99, "usage_type": "name"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 115, "usage_type": "name"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 122, "usage_type": "call"}, {"api_name": "web3.utils.transactions.Transaction.from_bytes", "line_number": 123, "usage_type": "call"}, {"api_name": "web3.utils.transactions.Transaction", "line_number": 123, "usage_type": "name"}, {"api_name": "web3.utils.signing.hash_of_signed_transaction", "line_number": 124, "usage_type": "call"}, {"api_name": "web3.utils.transactions.vrs_from", "line_number": 125, "usage_type": "call"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 120, "usage_type": "name"}, {"api_name": "eth_keys.KeyAPI", "line_number": 128, "usage_type": "call"}, {"api_name": "web3.utils.encoding.to_bytes", "line_number": 135, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 137, "usage_type": "call"}, {"api_name": "web3.utils.signing.sign_message_hash", "line_number": 139, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.AttributeDict", "line_number": 140, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 141, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 146, "usage_type": "call"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 130, "usage_type": "name"}, {"api_name": "collections.Mapping", "line_number": 154, "usage_type": "argument"}, {"api_name": "web3.utils.signing.sign_transaction_dict", "line_number": 164, "usage_type": "call"}, {"api_name": "eth_utils.keccak", "line_number": 166, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.AttributeDict", "line_number": 168, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 169, "usage_type": "call"}, {"api_name": "web3.utils.datastructures.HexBytes", "line_number": 170, "usage_type": "call"}, {"api_name": "web3.utils.decorators.combomethod", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "282542096", "text": "\"\"\"Scrapes images from Bing's image of the day\"\"\"\nimport os\nimport urllib\nimport urllib.request\nimport json\nfrom bs4 import BeautifulSoup\n\ndef scrape():\n download_images('https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=8&mkt=en-US', 0)\n download_images('https://www.bing.com/HPImageArchive.aspx?format=js&idx=8&n=8&mkt=en-US', 1)\n\ndef download_images(url, imageStart):\n save_location = os.path.dirname(os.path.realpath(__file__)) + \"/scrape/\"\n if not os.path.exists(save_location):\n print(\"Making image directory...\")\n os.makedirs(save_location)\n \n website = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(website, \"html.parser\")\n raw_json = soup.get_text()\n formatted_json = json.loads(raw_json)\n num_items = len(formatted_json['images'])\n for j in range(imageStart, num_items):\n filename = formatted_json['images'][j]['url']\n parts = filename.split(\"/\")\n if os.path.isfile(save_location + parts[4]):\n print(\"Image '\" + save_location + parts[4] + \"' already exists.\")\n else:\n print(\"Image output to: \" + save_location + parts[4])\n urllib.request.urlretrieve(\"http://www.bing.com\" + formatted_json['images'][j]['url'], save_location + parts[4])\n\ndef main():\n \"\"\"Main entry point\"\"\"\n scrape()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 1366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "273267712", "text": "import sys\nimport matplotlib\nimport math\nimport scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import array\nimport glob\nfrom collections import namedtuple\nimport matplotlib.ticker as mticker\n\n# Excitation frequency and amplitude\nfa = 1.0e5\ndpa = 1.0e6\n\n# Number of Fubini harmonics in the outer loop\nNF = 20\n\n# Number Bessel summands\nNB = 20\n\n# Number of time steps per simulated time and number of simulated periods\nNtimesteps = 400\nNperiods = 2\n\n# Position divided by the shock formation distance\n# In the current implementation, the Fubini solution is singular at sigma = 0!\nsigma = 0.5\n\n# Duration of one excitation period\nTa = 1.0/fa\n\n# Vectors of dimensionless time and pressure\nfatau = []\npbydpa = []\n\n# Time loop\nfor i in range(0, Ntimesteps):\n \n # calculating time and dimensionless time\n tau = Ta*float(Nperiods)/float(Ntimesteps-1)*float(i)\n fatau.append(fa*tau)\n omegatau = 2.0*math.pi*fatau[i]\n \n # Loop over the harmonics of the Fubini solution\n # sumFT is the sum over the Fubini terms (outer loop)\n sumFT = 0.0\n for n in range(1, NF+1):\n \n coeff = 2.0/(float(n)*sigma)\n invcoeff = 1.0/coeff\n \n # Loop over the Bessel summands\n # sumBT is the sum of the Bessel summands\n sumBT = 0.0\n for m in range(0, NB+1):\n # Computing the factorials of the Bessel function\n facm = float(math.factorial(m))\n facmplusn = float(math.factorial(m+n))\n \n # Computing the Bessel summand and the sum of Bessel summands\n BT = float((-1)**m)/float(facm*facmplusn)*invcoeff**(2*m+n)\n sumBT = sumBT + BT\n \n FT = coeff*math.sin(float(n)*omegatau)*sumBT\n sumFT = sumFT + FT\n \n pbydpa.append(dpa*sumFT/dpa)\n\n# Plot results\nplt.plot(fatau, pbydpa)\nplt.show()", "sub_path": "Fubini.py", "file_name": "Fubini.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "math.pi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "math.factorial", "line_number": 58, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 59, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "540167463", "text": "# %load q01_load_data_tfidf/build.py\n# Default imports\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Write your solution here :\n\ndef q01_load_data_tfidf(path,max_df=0.95,min_df=2,no_features=1000):\n variable1=pd.read_csv(path)\n tf_vect=TfidfVectorizer(max_df=max_df,min_df=min_df,max_features=no_features,stop_words='english')\n variable2=tf_vect.fit_transform(variable1['talkTitle'])\n variable3=tf_vect.get_feature_names()\n return variable1,variable2,variable3\n\n# def q01_load_data_tfidf(path, max_df=0.95, min_df=2, no_features=1000):\n# dataset = pd.read_csv(path)\n# tfidf_vectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df, max_features=no_features, stop_words='english')\n# tfidf = tfidf_vectorizer.fit_transform(dataset['talkTitle'])\n# tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n# return dataset, tfidf, tfidf_feature_names\n\n\n\n", "sub_path": "q01_load_data_tfidf/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 926, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "122334517", "text": "import json\nimport pprint\nfrom urllib.request import urlopen\n\napi_key = 'j2ZaiJfpax7zzbstijJJ'\nquery_string = '?auth_token=' + api_key\n\nrequest_data = urlopen('https://www.quandl.com/api/v1/datasets/WIKI/AAPL.json' + query_string)\nstock_prices = json.loads(request_data.read().decode())\n#pprint.pprint(stock_prices['data'][0])\nprint(stock_prices)\n\n\nfor stock_price in stock_prices['data']:\n #opening day value is [1]\n print(stock_price[0], stock_price[1])\n", "sub_path": "examples/turtle-data.py", "file_name": "turtle-data.py", "file_ext": "py", "file_size_in_byte": 461, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.request.urlopen", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "125338523", "text": "import pandas as pd\nimport numpy as np\n\nfrom enum import Enum\n\n\nclass DataWrapper(object):\n \"\"\"Class to wrap the concatenated data.\n The different Mode settings change the output of test_data and train_data.\n\n The main advantage of this wrapper is, to write the cleaning, feature engineering and modelling code\n using ``DataWrapper.data``, ``DataWrapper.train_data`` and ``DataWrapper.test_data``.\n For validation and submission, only the ``Mode`` has to be changed, the remaining code remains as it is.\n\n Examples\n --------\n >>> d = DataWrapper(data_tr, data_te, DataWrapper.Mode.TRAIN)\n >>> d.train_data # data_tr\n >>> d.test_data # data_te\n\n >>> # data_su is not used in VALIDATION mode\n >>> d = DataWrapper(data_tr, data_te, data_va, data_su, DataWrapper.Mode.VALIDATE)\n >>> d.train_data # data_tr + data_te\n >>> d.test_data # data_va\n\n \"\"\"\n\n class Mode(Enum):\n TRAIN = 1\n VALIDATE = 2\n SUBMIT = 3\n\n def __init__(self, train, test, validate=None, submit=None, mode=Mode.TRAIN):\n self.__mode = mode\n\n if self.__mode == self.Mode.TRAIN:\n self.test_offset = len(train)\n data_used = [train, test]\n elif self.__mode == self.Mode.VALIDATE:\n self.test_offset = len(train) + len(test)\n data_used = [train, test, validate]\n elif self.__mode == self.Mode.SUBMIT:\n self.test_offset = len(train) + len(test) + len(validate)\n data_used = [train, test, validate, submit]\n else:\n raise Exception(\"Please select a predefined Mode from DataWrapper.Mode\")\n\n self.data = pd.concat(data_used, ignore_index=True)\n\n @property\n def train_data(self):\n return self.data[0:self.test_offset]\n\n @train_data.setter\n def train_data(self, d):\n if len(d) != self.test_offset:\n raise Exception(\"Data size does not match\")\n\n self.data[0:self.test_offset] = d\n\n @property\n def test_data(self):\n return self.data[self.test_offset:]\n\n @test_data.setter\n def test_data(self, d):\n if len(d) != len(self.data) - self.test_offset:\n raise Exception(\"Data size does not match\")\n\n self.data[self.test_offset:] = d\n", "sub_path": "Instacart/AdVetter/src/data/data_wrapper.py", "file_name": "data_wrapper.py", "file_ext": "py", "file_size_in_byte": 2281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "enum.Enum", "line_number": 28, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "548425231", "text": "import os\nimport time\nimport requests\nimport sys\nfrom time import sleep\nimport json\n\nsys.stdout.write(\"\\x1b]2;Experience\\x07\")\nrefresh_time = 15 # seconds\ncolor = '0A' # like u would type \"color 0A\" into cmd / leave empty for default\n\n\nwith open('config.json') as f:\n js = json.load(f)\nsalad_auth = js['salad_key']\ncookie = {\n \"Salad.Authentication\": salad_auth\n}\nheaders = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Salad/0.4.2 Chrome/78.0.3904.130 Electron/7.1.9 Safari/537.36'\n}\nwith open('art.txt', encoding='utf-8') as f:\n art = f.read()\nos.system('color ' + color)\n\n\ndef main():\n while True:\n os.system('cls')\n rxp = requests.get(\n url='https://app-api.salad.io/api/v1/profile/xp', headers=headers, cookies=cookie)\n\n rxp = rxp.json()\n\n print(art)\n\n print('Experience: \\u001b[1m\\u001b[33m' +\n str(rxp['lifetimeXp']) + 'XP')\n print(' \\u001b[32m')\n print('-------------------------------------')\n\n try:\n print('Press ctrl+c to Return!')\n sleep(5)\n except KeyboardInterrupt:\n print(\"Quitting...\")\n os.system('python \"Start.py\"')\n\n\nmain()\n", "sub_path": "Salad CLI+/Salad CLI USA/XP.py", "file_name": "XP.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.stdout.write", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "os.system", "line_number": 24, "usage_type": "call"}, {"api_name": "os.system", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "os.system", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "236844997", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.feature import match_template\nfrom skimage import io\n\nimage = io.imread(\"Sprites/blobs.png\")\nblob = image[170:220, 75:130]\n\nresult = match_template(image, blob)\nij = np.unravel_index(np.argmax(result), result.shape)\nx, y = ij[::-1]\n\nfig = plt.figure(figsize=(8, 3))\nax1 = plt.subplot(1, 3, 1)\nax2 = plt.subplot(1, 3, 2, adjustable='box-forced')\nax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced')\n\nax1.imshow(blob)\nax1.set_axis_off()\nax1.set_title('template')\n\nax2.imshow(image)\nax2.set_axis_off()\nax2.set_title('image')\n# highlight matched region\nhcoin, wcoin = coin.shape\nrect = plt.Rectangle((x, y), wblob, hblob, edgecolor='r', facecolor='none')\nax2.add_patch(rect)\n\nax3.imshow(result)\nax3.set_axis_off()\nax3.set_title('`match_template`\\nresult')\n# highlight matched region\nax3.autoscale(False)\nax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)\n\nplt.show()\n", "sub_path": "vision/blob2.py", "file_name": "blob2.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "skimage.io.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 8, "usage_type": "name"}, {"api_name": "skimage.feature.match_template", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Rectangle", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "36756631", "text": "\"\"\"Pytest fixture for pywinauto app\"\"\"\n\nimport pytest\nfrom .providers import DriverFactory, ConfigProvider\nfrom . import App\n\nTEST_CONFIG = ConfigProvider().get_config('config.txt')\nAPP_PATH = TEST_CONFIG.get_param('app_path')\n\n# @pytest.yield_fixture(scope=\"function\", params=[DriverFactory.get_chrome_driver,\n# DriverFactory.get_firefox_driver])\n@pytest.yield_fixture(scope=\"function\", params=[DriverFactory.get_chrome_driver])\ndef driver(request):\n \"\"\"driver fixture\"\"\"\n driver = request.param()\n driver.get(APP_PATH)\n yield driver\n driver.close()\n\n@pytest.fixture(scope=\"function\")\ndef app(driver):\n \"\"\"driver fixture\"\"\"\n return App(driver)", "sub_path": "PythonFlow_March2018/Examples/_09_Python_Acceptance_testing_Selenium_OOP_in_tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "providers.ConfigProvider", "line_number": 7, "usage_type": "call"}, {"api_name": "pytest.yield_fixture", "line_number": 12, "usage_type": "call"}, {"api_name": "providers.DriverFactory.get_chrome_driver", "line_number": 12, "usage_type": "attribute"}, {"api_name": "providers.DriverFactory", "line_number": 12, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "107387702", "text": "from __future__ import print_function\n\nimport glob\nimport math\nimport os\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\nmnist_dataframe = pd.read_csv(\n \"mnist_train_small.csv\",\n sep=\",\",\n header=None\n)\n\n# Use just the first 10000 records for training/validation\nmnist_dataframe = mnist_dataframe.head(10000)\nmnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))\n# print(mnist_dataframe.head())\n\n\ndef parse_labels_and_features(dataset):\n \"\"\"\n Extracts labels and features\n This is a good place to scale or transform the features if needed\n :param dataset: A Pandas 'Dataframe', containing the label on the first column\n and monochrome pixel values on the remaining columns, in row major order\n :return: A 'tuple' ‘(labels, features)’\n labels: A Pandas 'Series' features: A Pandas 'Dataframe'\n \"\"\"\n labels = dataset[0]\n # Dataframe.loc index ranges are inclusive at both ends\n features = dataset.loc[:, 1:784]\n\n # Scale the data to [0, 1] by dividing out the max value, 255\n features = features / 255\n\n return labels, features\n\n\ntraining_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])\n# print(training_examples.describe())\n\nvalidation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])\n# print(validation_examples.describe())\n\n\nrand_example = np.random.choice(training_examples.index)\n_, ax = plt.subplots()\nax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))\nax.set_title(\"Label: %i\" % training_targets.loc[rand_example])\nax.grid(False)\nplt.show()\n\n\ndef construct_feature_columns():\n \"\"\"\n Construct the TensorFlow Feature Columns\n :return: A set of feature columns\n \"\"\"\n # There are 784 pixels in each image\n return set([tf.feature_column.numeric_column('pixels', shape=784)])\n\n\ndef create_training_input_fn(features, labels, batch_size, num_epochs=None, shffle=True):\n \"\"\"\n A custom input_fn for sending MNIST data to the estimator for training\n\n :param features: The training features\n :param labels: The training labels\n :param batch_size: Batch size to use during training\n :param num_epochs:\n :param shffle:\n :return: A function that returns batches of training features and labels\n during training\n \"\"\"\n def _input_fn(num_epochs=None, shuffle=True):\n # Input pipelines are reset with each call to .train(). To ensure model\n # gets a good model sampling of data, even when number of steps is small,\n # we shuffle all the data before creating the Dataset object\n idx = np.random.permutation(features.index)\n raw_features = {\"pixels\": features.reindex(idx)}\n raw_targets = np.array(labels[idx])\n\n ds = Dataset.from_tensor_slices((raw_features, raw_targets))\n ds = ds.batch(batch_size).repeat(num_epochs)\n\n if(shuffle):\n ds = ds.shuffle(10000)\n\n feature_batch, label_batch = ds.make_one_shot_iterator().get_next()\n\n return feature_batch, label_batch\n\n return _input_fn\n\n\ndef create_predict_input_fn(features, labels, batch_size):\n \"\"\"\n A custom input_fn for sending mnist data to the estimator for predictions\n :param features: The features to base predictions on\n :param labels: The labels of the prediction examples\n :param batch_size:\n :return: A function that returns features and labels for predictions\n \"\"\"\n\n def _input_fn():\n raw_features = {\"pixels\": features.values}\n raw_targets = np.array(labels)\n\n ds = Dataset.from_tensor_slices((raw_features, raw_targets))\n ds = ds.batch(batch_size)\n\n feature_batch, label_batch = ds.make_one_shot_iterator().get_next()\n\n return feature_batch, label_batch\n\n return _input_fn\n\n\ndef train_linear_classification_model(\n learning_rate,\n steps,\n batch_size,\n training_examples,\n training_targets,\n validation_examples,\n validation_targets\n):\n \"\"\"\n Trains a linear classification model for the MNIST digits dataset\n\n In addition to training, this function also prints training progress information,\n a plot of the training and validation loss over time, and a confusion matrix\n :param learning_rate: An 'int', the learning rate to use\n :param steps: A non-zero 'int', the total number of training steps\n A training step consists of a forward and backward pass using a single batch\n :param batch_size:\n :param training_examples: Training features\n :param training_targets: Training labels\n :param validation_examples: Validation features\n :param validation_targets: Validation labels\n\n :return: The trained 'LinearClassifier' object\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n\n # Create the input functions\n predict_training_input_fn = create_predict_input_fn(training_examples, training_targets, batch_size)\n predict_validation_input_fn = create_predict_input_fn(validation_examples, validation_targets, batch_size)\n training_input_fn = create_training_input_fn(training_examples, training_targets, batch_size)\n\n # Create a LinearClassifier object\n my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n classifier = tf.estimator.LinearClassifier(\n feature_columns=construct_feature_columns(),\n n_classes=10,\n optimizer=my_optimizer,\n config=tf.estimator.RunConfig(keep_checkpoint_max=1)\n )\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics\n print(\"Training model...\")\n print(\"LogLoss error (on validation data):\")\n training_errors = []\n validation_errors = []\n for period in range(0, periods):\n # Train the model, starting from the prior state\n classifier.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n\n # Take a break and compute probabilities\n\n\n\n\n\n\n\n", "sub_path": "TensorFlow/tfBoy/multi-class_classification_of_handwritten_digits.py", "file_name": "multi-class_classification_of_handwritten_digits.py", "file_ext": "py", "file_size_in_byte": 6457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "tensorflow.logging.set_verbosity", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.feature_column.numeric_column", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.feature_column", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.python.data.Dataset.from_tensor_slices", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.python.data.Dataset", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "tensorflow.python.data.Dataset.from_tensor_slices", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.python.data.Dataset", "line_number": 126, "usage_type": "name"}, {"api_name": "tensorflow.train.AdagradOptimizer", "line_number": 171, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 171, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.estimator.clip_gradients_by_norm", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 172, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator.LinearClassifier", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.estimator", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tensorflow.estimator.RunConfig", "line_number": 177, "usage_type": "call"}, {"api_name": "tensorflow.estimator", "line_number": 177, "usage_type": "attribute"}]} +{"seq_id": "255640597", "text": "'''\nCopyright (c) 2014 Adam Giermanowski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n'''\n\nimport os\nimport csv\nimport urllib2\nfrom xml.dom import minidom\n\n\ndef get_stock_names():\n \"\"\" Returns a dictionary with stock names and IDs\n :return: dictionary (name: stockID).\n \"\"\"\n try:\n page = urllib2.urlopen(\"http://finanse.wp.pl/isin,PLOPTTC00019,stocks.xml\")\n dom = minidom.parse(page)\n stocks = dom.getElementsByTagName('item')\n except:\n raise Exception('Could not parse stocks from: http://finanse.wp.pl/isin,PLOPTTC00019,stocks.xml')\n\n names = [str(name.getAttribute('name')) for name in stocks]\n ids = [str(name.getAttribute('value')) for name in stocks]\n data = dict(zip(names, ids))\n\n # remove trash data\n for name in data.keys():\n digit_counter = list(name)\n digit_counter = filter(lambda x: x in '1234567890', digit_counter)\n num_digits = len(digit_counter)\n if num_digits >= 3:\n data.pop(name)\n\n return data\n\n\ndef get_stock_names_csv():\n \"\"\" Saves a list of stock names to csv\n \"\"\"\n\n data = get_stock_names()\n\n try:\n os.remove('names.csv')\n except:\n pass\n\n with open('names.csv', 'w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(data)\n\n\ndef get_fund_names():\n \"\"\" Returns 2 lists - fund names and fund IDs\n \"\"\"\n page = urllib2.urlopen(\"http://finanse.wp.pl/fundslist.xml\")\n dom = minidom.parse(page)\n funds = dom.getElementsByTagName('item')\n\n names = [str(fund.getAttribute('name').encode('utf-8')) for fund in funds]\n ids = [str(fund.getAttribute('value').encode('utf-8')) for fund in funds]\n\n return names, ids\n\n\ndef get_fund_names_csv():\n \"\"\" Saves a list of fund names to csv\n \"\"\"\n names, ids = get_fund_names()\n\n try:\n os.remove('funds.csv')\n except:\n pass\n\n lists = zip(names, ids)\n with open('funds.csv', 'w') as file:\n csv_writer = csv.writer(file)\n csv_writer.writerows(lists)\n", "sub_path": "get_names.py", "file_name": "get_names.py", "file_ext": "py", "file_size_in_byte": 3004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib2.urlopen", "line_number": 34, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 35, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 35, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 74, "usage_type": "call"}, {"api_name": "xml.dom.minidom.parse", "line_number": 75, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 75, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 90, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "341377727", "text": "from pydub import AudioSegment\nfrom datetime import datetime, timedelta\nimport os\n\nbase_dir = \"Konvertiert\\\\\"\nexport_dir = base_dir + \"Merged\\\\\"\n\ndef export_audiofile(audio_file, date_start, suffix):\n print(\"Export \" + str(date_start))\n date_base = date_start.replace(minute=0, second=0)\n base_audio = AudioSegment.silent(duration=(date_start-date_base).total_seconds()*1000)\n full_audio = base_audio + audio_file\n itr_sec = 0\n while itr_sec < full_audio.duration_seconds:\n end_sec = itr_sec + 60 * 60\n part_audio = full_audio[itr_sec*1000:end_sec*1000]\n part_date = date_base + timedelta(seconds=itr_sec)\n part_file = export_dir + part_date.strftime('%Y-%m-%d %H-%M') + suffix\n if not os.path.isfile(part_file):\n print(\"Write \" + part_file)\n part_audio.export(part_file)\n itr_sec = end_sec\n\n\nstarted = False\n\nfor file in os.listdir(base_dir):\n filename = os.fsdecode(file)\n if filename.endswith(\".WAV\"): \n date_str = filename[:14]\n date_start = datetime.strptime(date_str, '%Y%m%d%H%M%S')\n song = AudioSegment.from_wav(base_dir + filename)\n dur_s = song.duration_seconds\n date_end = date_start + timedelta(seconds=dur_s) \n \n if not started: \n audio_file = song\n track_start = date_start\n track_suffix = filename[14:]\n started = True\n else:\n diff_to_prev = (date_start - prev_end).total_seconds()\n \n if diff_to_prev < 3600:\n between = AudioSegment.silent(duration=diff_to_prev*1000)\n audio_file = audio_file + between + song \n else:\n export_audiofile(audio_file, track_start, track_suffix)\n audio_file = song\n track_start = date_start\n track_suffix = filename[14:]\n prev_end = date_end\nexport_audiofile(audio_file, track_start, track_suffix)\n\n\n", "sub_path": "audio-proc/proc.py", "file_name": "proc.py", "file_ext": "py", "file_size_in_byte": 2015, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pydub.AudioSegment.silent", "line_number": 11, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.fsdecode", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}, {"api_name": "pydub.AudioSegment.from_wav", "line_number": 32, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 34, "usage_type": "call"}, {"api_name": "pydub.AudioSegment.silent", "line_number": 45, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "167769661", "text": "import pandas as pd\nimport Helpfunctions as hf\nimport VisualizationFunctions as vf\nimport matplotlib.pyplot as plt\n\nfrom pandas.tools.plotting import scatter_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, log_loss, roc_curve\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\n\nclass Titanic_data:\n labelled_data_path = \"\"\n df_labelled_original = pd.DataFrame()\n df_labelled = pd.DataFrame()\n\n unlabelled_data_path = \"\"\n df_unlabelled_original = pd.DataFrame()\n df_unlabelled = pd.DataFrame()\n\n output_path = \"\"\n\n #classifier = AdaBoostClassifier(n_estimators=3)\n classifier = LogisticRegression(C=2.75)\n\n def __init__(self, labelled_data_path, unlabelled_data_path, output_path):\n self.labelled_data_path = labelled_data_path\n self.unlabelled_data_path = unlabelled_data_path\n self.output_path = output_path\n\n self.df_labelled_original = pd.read_csv(labelled_data_path, na_values=['NaN', '?'])\n self.df_unlabelled_original = pd.read_csv(unlabelled_data_path, na_values=['NaN', '?'])\n\n def encode_features(self):\n self.df_labelled = self.df_labelled_original.copy(deep=True)\n self.df_unlabelled = self.df_unlabelled_original.copy(deep=True)\n\n for df in [self.df_labelled, self.df_unlabelled]:\n titles = []\n surnames = []\n for name in df['Name']:\n names = name.split(\",\")\n surname = names[0]\n surnames.append(surname)\n title_and_first_name = names[1].split(\".\")\n title = title_and_first_name[0].replace(\" \", \"\")\n if (title != \"Mr\")\\\n and (title != \"Mrs\")\\\n and (title != \"Miss\")\\\n and (title != \"Master\")\\\n and (title != \"Dr\"):\n title = \"Rare\"\n titles.append(title)\n\n title_series = pd.Series(titles)\n surname_series = pd.Series(surnames)\n\n df['Title'] = title_series\n df['Surname'] = surname_series\n\n df.drop(\"Name\", axis=1, inplace=True)\n df.drop(\"Cabin\", axis=1, inplace=True)\n df.drop(\"Ticket\", axis=1, inplace=True)\n\n df[\"Familysize\"] = df[\"Parch\"] + df[\"SibSp\"]\n df[\"Isalone\"] = (df[\"Familysize\"] == 0)*1\n\n df['Embarked'] = df['Embarked'].factorize()[0] # get rid of NaN\n le = LabelEncoder()\n\n df[\"Sex\"] = le.fit_transform(df[\"Sex\"])\n df[\"Title\"] = le.fit_transform(df[\"Title\"])\n df[\"Surname\"] = le.fit_transform(df[\"Surname\"])\n df[\"Isalone\"] = le.fit_transform(df[\"Isalone\"])\n df[\"Embarked\"] = le.fit_transform(df[\"Embarked\"])\n #hf.encode_text_dummy(df, \"Embarked\")\n\n\n\n return\n\n def impute_features(self, plot_before_and_after=True):\n f = \"\"\n ax1 = \"\"\n ax2 = \"\"\n ax3 = \"\"\n ax4 = \"\"\n if plot_before_and_after:\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n plt.suptitle(\"Data density, imputation\")\n ax1.set_title(\"Labelled data (Age)\")\n ax2.set_title(\"Unlabelled data (Age)\")\n ax3.set_title(\"Labelled data (Fare)\")\n ax4.set_title(\"Unlabelled data (Fare)\")\n\n self.df_labelled[\"Age\"].plot.density(ax=ax1, label=\"Pre (Age)\")\n self.df_unlabelled[\"Age\"].plot.density(ax=ax2, label=\"Pre (Age)\")\n self.df_labelled[\"Fare\"].plot.density(ax=ax3, label=\"Pre (Fare)\")\n self.df_unlabelled[\"Fare\"].plot.density(ax=ax4, label=\"Pre (Fare)\")\n\n self.df_labelled = self.impute_fare(self.df_labelled)\n self.df_unlabelled = self.impute_fare(self.df_unlabelled)\n\n self.df_labelled = self.impute_age(self.df_labelled)\n self.df_unlabelled = self.impute_age(self.df_unlabelled)\n\n # imputer = skmice.MiceImputer()\n # X, specs = imputer.transform(df)\n\n if plot_before_and_after:\n self.df_labelled[\"Age\"].plot.density(ax=ax1, label=\"Post (Age)\")\n self.df_unlabelled[\"Age\"].plot.density(ax=ax2, label=\"Post (Age)\")\n self.df_labelled[\"Fare\"].plot.density(ax=ax3, label=\"Post (Fare)\")\n self.df_unlabelled[\"Fare\"].plot.density(ax=ax4, label=\"Post (Fare)\")\n ax1.legend(loc=\"upper right\")\n ax2.legend(loc=\"upper right\")\n ax3.legend(loc=\"upper right\")\n ax4.legend(loc=\"upper right\")\n plt.show()\n\n self.df_labelled = self.bin_age_and_fare(self.df_labelled)\n self.df_unlabelled = self.bin_age_and_fare(self.df_unlabelled)\n return\n\n def bin_age_and_fare(self, dataset):\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'];\n\n return dataset\n\n def impute_age(self, dataframe):\n df_is_not_nan = dataframe[dataframe[\"Age\"].notnull()]\n X, y = hf.to_xy(df_is_not_nan, \"Age\")\n lr = LinearRegression()\n lr.fit(X, y)\n\n df_is_nan = dataframe[dataframe[\"Age\"].isnull()]\n df_is_nan = df_is_nan.drop(\"Age\", axis=1)\n age_pred = lr.predict(df_is_nan)\n df_is_nan[\"Age\"] = age_pred\n dataframe = pd.concat([df_is_nan, df_is_not_nan])\n return dataframe\n #dataframe[\"Age\"] = dataframe[\"Age\"].fillna(dataframe[\"Age\"].mean())\n\n\n def impute_fare(self, dataframe):\n dataframe[\"Fare\"] = dataframe[\"Fare\"].fillna(dataframe[\"Fare\"].median())\n return dataframe\n\n def plot_scatter_matrix(self):\n scatter_matrix(self.df_labelled, diagonal=\"kde\")\n plt.show()\n scatter_matrix(self.df_unlabelled, diagonal=\"kde\")\n plt.show()\n\n def describe_original_data(self):\n print(\"Training data set description:\")\n print(self.df_labelled_original.info())\n print(self.df_labelled_original.describe())\n print(\"Submission data set description:\")\n print(self.df_unlabelled_original.info())\n print(self.df_unlabelled_original.describe())\n\n def describe_prepared_data(self):\n print(\"Training data set description:\")\n print(self.df_labelled.info())\n print(self.df_labelled.describe())\n print(\"Submission data set description:\")\n print(self.df_unlabelled.info())\n print(self.df_unlabelled.describe())\n\n def fit_classifier(self, print_report=True):\n X, y = hf.to_xy(self.df_labelled.drop('PassengerId', axis=1), \"Survived\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=42)\n self.classifier.fit(X_train, y_train)\n y_pred = self.classifier.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=[\"Survived\", \"Diseased\"]))\n\n def predict_unlabelled_data(self):\n self.df_unlabelled[\"Survived\"] = \\\n self.classifier.predict(self.df_unlabelled.drop('PassengerId', axis=1))\n\n self.df_unlabelled[['PassengerId', 'Survived']]\\\n .to_csv(self.output_path, index=False, sep=\",\")\n\n def test_different_settings(self):\n\n X, y = hf.to_xy(self.df_labelled.drop('PassengerId', axis=1), \"Survived\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.25, random_state=42)\n\n #testparameters = [2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6]\n testparameters = range(1,300)\n\n train_error = list()\n test_error = list()\n log_losses = list()\n\n plt.figure()\n\n for testparameter in testparameters:\n classifier = LogisticRegression(C=2.75)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n y_proba = classifier.predict_proba(X_test)\n train_error.append(classifier.score(X_train, y_train))\n test_error.append(classifier.score(X_test, y_test))\n log_losses.append(log_loss(y_test, y_proba))\n #print(\"\\nTestparameter: \" + str(testparameter))\n #print(confusion_matrix(y_test, y_pred))\n #print(log_loss(y_test, y_proba))\n\n\n plt.semilogx(testparameters, train_error, label=\"train\")\n plt.semilogx(testparameters, test_error, label=\"test\")\n plt.legend(loc='lower left')\n plt.show()\n\n plt.plot(testparameters, log_losses)\n plt.show()\n", "sub_path": "Titanic_data.py", "file_name": "Titanic_data.py", "file_ext": "py", "file_size_in_byte": 9351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "Helpfunctions.to_xy", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 155, "usage_type": "call"}, {"api_name": "pandas.tools.plotting.scatter_matrix", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "pandas.tools.plotting.scatter_matrix", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "Helpfunctions.to_xy", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 191, "usage_type": "call"}, {"api_name": "Helpfunctions.to_xy", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 215, "usage_type": "call"}, {"api_name": "sklearn.metrics.log_loss", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 230, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 230, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}]} +{"seq_id": "13581861", "text": "\"\"\"\nMetadata loading\n\"\"\"\nfrom pathlib import Path\nfrom typing import Any, List, Union, TYPE_CHECKING\n\nfrom .constants import (\n ALBUMART_SUPPORTED_FILENAMES,\n BOOKLET_SUPPORTED_FILENAMES,\n)\nfrom ..exceptions import MetadataError\nfrom .formats.albumart import AlbumArt\nfrom .formats.booklet import Booklet\n\nif TYPE_CHECKING:\n from ..configuration import Configuration\n from ..library.album import Album\n from ..library.tree import LibraryItem\n\n\nclass AlbumMetadata:\n \"\"\"\n Metadata loader for album objects\n \"\"\"\n album: 'Album'\n albumart: List[AlbumArt]\n booklets: List[Booklet]\n\n def __init__(self, album: 'Album') -> None:\n self.album = album\n self.albumart = []\n self.booklets = []\n\n @property\n def config(self) -> 'Configuration':\n \"\"\"\n Return configuration via the Album's library object\n \"\"\"\n return self.album.library.config\n\n def debug(self, *args: List[Any]) -> None:\n \"\"\"\n Send debug message to stderr if debug mode is enabled\n \"\"\"\n self.album.debug(*args)\n\n def error(self, *args: List[Any]) -> None:\n \"\"\"\n Send error message to stderr\n \"\"\"\n self.album.error(*args)\n\n def message(self, *args: List[Any]) -> None:\n \"\"\"\n Show message to stdout unless silent flag is set\n \"\"\"\n self.album.message(*args)\n\n def add_metadata_file(self, metadata_file: Union['LibraryItem', Path]) -> 'LibraryItem':\n \"\"\"\n Add a metadata file to the album\n \"\"\"\n kwargs = {\n 'config': self.config,\n 'library': self.album.library,\n 'album': self.album,\n 'path': metadata_file,\n }\n if metadata_file.name in ALBUMART_SUPPORTED_FILENAMES:\n albumart = AlbumArt(**kwargs)\n self.albumart.append(albumart)\n return albumart\n if metadata_file.name in BOOKLET_SUPPORTED_FILENAMES:\n booklet = Booklet(**kwargs)\n self.booklets.append(booklet)\n return booklet\n raise MetadataError(f'Unknown file type: {metadata_file}')\n", "sub_path": "oodi/metadata/loader.py", "file_name": "loader.py", "file_ext": "py", "file_size_in_byte": 2141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "formats.albumart.AlbumArt", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "formats.booklet.Booklet", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 59, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 59, "usage_type": "name"}, {"api_name": "constants.ALBUMART_SUPPORTED_FILENAMES", "line_number": 69, "usage_type": "name"}, {"api_name": "formats.albumart.AlbumArt", "line_number": 70, "usage_type": "call"}, {"api_name": "constants.BOOKLET_SUPPORTED_FILENAMES", "line_number": 73, "usage_type": "name"}, {"api_name": "formats.booklet.Booklet", "line_number": 74, "usage_type": "call"}, {"api_name": "exceptions.MetadataError", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "321545312", "text": "from django.shortcuts import render, redirect, reverse\nfrom fundstrackerapp.models import FinancialGoal\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\n\n@login_required\ndef journal_entry_form(request):\n\n if request.method == 'GET':\n\n # loops through all incompleted financial goals and\n # filters out the current goals vs goals that have expired,\n # then passing all those current and incomplete financial \n # goals for the logged-in user into the form template to \n # display them in a dropdown menu to attach entry to a goal\n\n incomplete_financial_goals = FinancialGoal.objects.filter(user=request.user.id, is_completed=0)\n current_goals = []\n past_goals = []\n \n for goal in incomplete_financial_goals:\n goal_date_str = str(goal.created_at) \n exp_year = int(goal_date_str.split('-')[0])\n exp_month = int(goal_date_str.split('-')[1]) + goal.timeframe\n if exp_month > 12:\n exp_year += 1\n exp_month -= 12\n exp_day_str = goal_date_str.split('-')[2]\n exp_day = int(exp_day_str.split()[0])\n\n curr_date = str(datetime.datetime.now())\n curr_date_str = curr_date.split()[0]\n curr_year = int(curr_date_str.split('-')[0])\n curr_month = int(curr_date_str.split('-')[1])\n curr_day = int(curr_date_str.split('-')[2])\n\n if exp_year < curr_year:\n past_goals.append(goal)\n elif exp_year == curr_year and exp_month < curr_month:\n past_goals.append(goal)\n elif exp_year == curr_year and exp_month == curr_month and exp_day < curr_day:\n past_goals.append(goal)\n\n for goal in incomplete_financial_goals:\n if goal not in past_goals:\n current_goals.append(goal)\n\n\n template = 'journal/form.html'\n context = {\n 'current_goals': current_goals\n }\n\n return render(request, template, context)", "sub_path": "fundstrackerproject/fundstrackerapp/views/journal/form.py", "file_name": "form.py", "file_ext": "py", "file_size_in_byte": 2111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "fundstrackerapp.models.FinancialGoal.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "fundstrackerapp.models.FinancialGoal.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "fundstrackerapp.models.FinancialGoal", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 56, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "215396879", "text": "import requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\n\n\nclass Spider(object):\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n web_site ='https://bbs2.ongacg.com/'\n comic_save_path = ''\n chapter_save_path = ''\n\n # 获取网页信息\n def get_html(self, url):\n html = requests.get(url, headers=self.headers, timeout=5000)\n html.encoding = html.apparent_encoding # 'utf8'\n html = html.text\n html = html.encode('gbk', \"ignore\").decode('gbk') # 先用gbk编码,忽略掉非法字符,然后再译码\n html = html.encode('utf-8').decode('utf-8')\n soup = bs(html, 'lxml')\n return soup\n\n def init_spider(self, web_url, page_num):\n post_list = []\n csv_title = ['标题', '内容简介', '下载链接', '提取码', '解压密码']\n\n for index in range(22, page_num):\n post_list = []\n url = web_url.format(str(index))\n print(url)\n page = self.get_html(url)\n # print(page)\n list_container = page.find('table', {'id': 'threadlisttableid'})\n # print(list_container)\n comic_list = list_container.find_all('tbody')\n # print(comic_list)\n for comic in comic_list:\n # print(comic)\n comic_info = comic.find('a', {'class': 's xst'})\n if comic_info:\n # 漫画标题\n comic_title = comic_info.text\n print(comic_title)\n # 漫画详情\n comic_content = ''\n # # 漫画作者\n # author_list = ''\n # # 漫画详情页地址\n comic_url = self.web_site + comic_info['href']\n comic_html = self.get_html(comic_url)\n # 漫画简介\n comic_content = comic_html.find('td', {'class': 't_f'})\n\n # 缓存这一条文章的全部信息,以备保存到CSV\n post_list.append([comic_title, comic_content, '', '', ''])\n\n post_data = pd.DataFrame(columns=csv_title, data=post_list)\n if index == 1:\n post_data.to_csv('ACG次元网列表2.csv', encoding='UTF-8')\n else:\n post_data.to_csv('ACG次元网列表2.csv', mode='a', header=False, encoding='UTF-8')\n\n\n\nurl = 'https://bbs2.ongacg.com/forum-37-{}.html'\nspider = Spider()\nspider.init_spider(url, 97)\n", "sub_path": "51绅士资源吧/ACG次元网.py", "file_name": "ACG次元网.py", "file_ext": "py", "file_size_in_byte": 2614, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "485033635", "text": "#!https://realpython.com/introduction-to-mongodb-and-python/\nimport pymongo\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\n\ndb = client['pymongo_test']\nposts = db.posts\n\nname= raw_input(\"What is your name?\")\nfather_name= raw_input(\"What is your father name?\")\n\npost_data = {'Name': name, 'father_name': father_name}\n\n\nresult = posts.insert_one(post_data)\n\n\nimport pymongo\n\nmyclient = pymongo.MongoClient()\ndb = myclient[\"pymongo_test\"]\nposts = db.posts\n\nfor x in posts.find():\n print(x) \n", "sub_path": "pymongo.py", "file_name": "pymongo.py", "file_ext": "py", "file_size_in_byte": 502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pymongo.MongoClient", "line_number": 3, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "190229063", "text": "\"\"\"empty message\n\nRevision ID: 70fc4a497980\nRevises: 2591f538e592\nCreate Date: 2019-01-15 19:09:27.361461\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '70fc4a497980'\ndown_revision = '2591f538e592'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_business_connection_dm_connection_id', table_name='business_connection')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('ix_business_connection_dm_connection_id', 'business_connection', ['dm_connection_id'], unique=True)\n # ### end Alembic commands ###\n", "sub_path": "backend-master/src/business/main/migrations/versions/70fc4a497980_.py", "file_name": "70fc4a497980_.py", "file_ext": "py", "file_size_in_byte": 788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "alembic.op.drop_index", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.create_index", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "374335819", "text": "#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: rabbitmqpub_setqueue.py\n\n Description: Unit test of rabbitmqpub.set_queue in rabbitmq_class.py.\n\n Usage:\n test/unit/rabbitmq_class/rabbitmqpub_setqueue.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport rabbitmq_class\nimport version\n\n__version__ = version.__version__\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_setup_queue\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.name = None\n self.host = \"ServerName\"\n self.port = 5555\n self.connection = None\n self.exchange_name = \"Exchange_Name\"\n self.queue_name = \"Queue_Name\"\n self.routing_key = \"Route_Key\"\n self.auto_delete = True\n\n @mock.patch(\"rabbitmq_class.RabbitMQPub.check_confirm\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.bind_queue\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.create_queue\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.setup_exchange\")\n @mock.patch(\"rabbitmq_class.pika\")\n def test_setup_queue(self, mock_pika, mock_setup, mock_create, mock_bind,\n mock_check):\n\n \"\"\"Function: test_setup_queue\n\n Description: Test setup_queue method.\n\n Arguments:\n\n \"\"\"\n\n mock_pika.PlainCredentials.return_value = \"PlainCredentials\"\n mock_pika.ConnectionParameters.return_value = \"ConnectionParameters\"\n mock_setup.return_value = True\n mock_create.return_value = True\n mock_bind.return_value = True\n mock_check.return_value = True\n rmq = rabbitmq_class.RabbitMQPub(self.name, \"xxxxx\")\n\n self.assertFalse(rmq.setup_queue())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "test/unit/rabbitmq_class/rabbitmqpub_setqueue.py", "file_name": "rabbitmqpub_setqueue.py", "file_ext": "py", "file_size_in_byte": 2113, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.version_info", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "version.__version__", "line_number": 34, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rabbitmq_class.RabbitMQPub", "line_number": 90, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 68, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 69, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 70, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 71, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 72, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "537635849", "text": "#!/usr/bin/python3\n\n# Builds JSON files from CSV files\n#\n# The CSV files need to be placed in a device subfolder.\n# Devices subfolder are labeled with a shorthand that is described in detail in the device.json file.\n# The device folder\n\nimport os\nimport glob\nimport csv\nimport json\n\ncsv_data = {}\n\n\ndef files_in_folder(folder):\n \"\"\"Returns a list of files in the folder and all\n its subfolders recursively. The folder can be\n written with wildcards as with the Unix find command.\n \"\"\"\n files = []\n for f in glob.glob(folder):\n if os.path.isdir(f):\n files.extend(files_in_folder(f + os.sep + \"**\"))\n else:\n files.append(f)\n return files\n\n\ndef parse_sig(sig):\n function_args = sig.split(\"/\")[0]\n size = int(sig.split(\"/\")[1])\n time_type = sig.split(\"/\")[2]\n first_prim = function_args.find(\"Prim\")\n first_rev = function_args.find(\"Rev\")\n if first_rev == -1:\n args_start = first_prim\n elif first_prim == -1:\n args_start = first_rev\n else:\n args_start = min(first_prim, first_rev)\n function = function_args[: args_start - 1]\n args = function_args[args_start:].split(\"_\")\n data_params = \"\"\n types = \"\"\n for i in range(len(args)):\n if i % 2 == 0:\n if args[i] == \"Prim\":\n data_params += \"data,\"\n else:\n data_params += \"param,\"\n else:\n new_type = args[i]\n if new_type == \"int1\":\n new_type = \"array[] int\"\n types += new_type + \",\"\n return function, types[:-1], data_params[:-1], size, time_type\n\n\ndef process_file(csv_filename):\n line_off = 0\n device_label = csv_filename.split(\"/\")[1]\n with open(csv_filename) as f:\n # google benchmark writes some non-csv data at beginning\n for line in iter(f.readline, \"\"):\n if line.startswith(\"name,iterations\"):\n f.seek(f.tell() - len(line) - line_off, os.SEEK_SET)\n break\n line_off = -1\n data = csv.reader(f)\n header_read = False\n for i in data:\n if not header_read:\n header_read = True\n continue\n function, types, data_params, s, t = parse_sig(i[0])\n if not (function in csv_data):\n csv_data[function] = {}\n function_data = csv_data[function]\n if not (types in function_data):\n function_data[types] = {}\n sig_types_data = function_data[types]\n if not (data_params in sig_types_data):\n sig_types_data[data_params] = {}\n sig_data = sig_types_data[data_params]\n if not (device_label in sig_data):\n sig_data[device_label] = {\"mean\": [[], []], \"stddev\": [[], []]}\n sig_device_data = sig_data[device_label]\n if t == \"manual_time_mean\":\n sig_device_data[\"mean\"][0].append(s)\n sig_device_data[\"mean\"][1].append(float(i[2]))\n if t == \"manual_time_stddev\":\n sig_device_data[\"stddev\"][0].append(s)\n sig_device_data[\"stddev\"][1].append(float(i[2]))\n\n\nfor f in files_in_folder(\"csv\"):\n process_file(f)\n\nfor f in csv_data:\n with open(\"json/\" + f + \".json\", \"w\") as fp:\n json.dump(csv_data[f], fp, indent=4, sort_keys=True)\n", "sub_path": "scripts/csv_to_json.py", "file_name": "csv_to_json.py", "file_ext": "py", "file_size_in_byte": 3365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "glob.glob", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.SEEK_SET", "line_number": 68, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "206833285", "text": "#This file extract the publication year in each study. The years are saved in the file YearsPublications.txt.\r\n# Script created in Python V3.8\r\nimport urllib3\r\nimport codecs\r\nimport json\r\nimport time\r\ncountpapers=0 #Variable to count papers\r\n#Block to open DOIs\r\nf=open('DOIs.txt','r') #Abrir archivo DOI.txt en caso contrario crear archivo con DOI\r\nDOIS=f.readlines()\r\nf.close()\r\n#Block for CrossRef Query\r\nhttp = urllib3.PoolManager()\r\ndest = codecs.open('YearsPublications.txt',\"w\") #txt file with years\r\n#This file will be used in order to plot the information\r\n\r\nfor p in DOIS:\r\n try:\r\n countpapers=countpapers+1\r\n r = http.request('GET', 'https://api.crossref.org/works/'+p)\r\n data= json.loads(r.data.decode('utf-8'))\r\n print (str(countpapers)+\":\"+str(data))\r\n D = (data['message']['published-print']) #Get Data for years. Data are encoding in UTF through JSON\r\n Year=D['date-parts'][0]\r\n dest.write(str(Year[0]))\r\n dest.write(\"\\n\")\r\n print (Year[0])\r\n time.sleep(10) #Ten seconds between requests\r\n except:\r\n time.sleep(10) # Ten seconds between requests\r\n dest.write(\"Error\")\r\n print(\"Error\")\r\n dest.write(\"\\n\")\r\n\r\ndest.close() #Close File of publications by years\r\n", "sub_path": "Scripts/ExtractYears.py", "file_name": "ExtractYears.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib3.PoolManager", "line_number": 13, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "483291852", "text": "import os\nfrom Bio.PDB import PDBParser, Residue, Polypeptide\nfrom Bio import PDB\n\n\nclass Protein(object):\n\t\"\"\" \n\tThis is a class that obtains protein sequence \n\tand structure information \n \n\tAttributes: \n\t\td_seq (dict of {int: str): connects PDB \n\t\t\tresidue number with its aminoacid type\n\t\"\"\"\n\n\tdef __init__(self, pdb_path):\n\t\tself.structure = PDBParser().get_structure(\"\", pdb_path)\n\t\tself.residues = []\n\t\tself.d_sequence = {}\n\n\t\tself.parse_structure()\n\n\tdef parse_structure(self):\n\t\tfor residue in self.structure.get_residues():\n\t\t\tif PDB.is_aa(residue, standard=True):\t#only consider standard 20 residues\n\t\t\t\tres = residue.id[1]\n\t\t\t\tif res not in self.residues:\t#dont doublecount mutated residues\t(ex. 1ORC)\t\n\t\t\t\t\tself.residues.append(res)\n\t\t\t\t\tself.d_sequence[res] = Polypeptide.three_to_one(Residue.Residue.get_resname(residue))\n\n\tdef get_residues_sequence(self):\n\t\treturn self.residues, self.d_sequence\n", "sub_path": "utlts/pdb_info.py", "file_name": "pdb_info.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "Bio.PDB.PDBParser", "line_number": 17, "usage_type": "call"}, {"api_name": "Bio.PDB.is_aa", "line_number": 25, "usage_type": "call"}, {"api_name": "Bio.PDB", "line_number": 25, "usage_type": "name"}, {"api_name": "Bio.PDB.Polypeptide.three_to_one", "line_number": 29, "usage_type": "call"}, {"api_name": "Bio.PDB.Polypeptide", "line_number": 29, "usage_type": "name"}, {"api_name": "Bio.PDB.Residue.Residue.get_resname", "line_number": 29, "usage_type": "call"}, {"api_name": "Bio.PDB.Residue.Residue", "line_number": 29, "usage_type": "attribute"}, {"api_name": "Bio.PDB.Residue", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "595995898", "text": "from dataclasses import dataclass\n\nfrom expungeservice.models.charge import ChargeType\nfrom expungeservice.models.charge import ChargeUtil\nfrom expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus\n\n\n@dataclass(frozen=True)\nclass MisdemeanorClassBC(ChargeType):\n type_name: str = \"Misdemeanor Class B or C\"\n expungement_rules: str = \"\"\"Convictions for misdemeanors are generally eligible under ORS 137.225(1)(b).\nExceptions include convictions related to sex, child and elder abuse, and driving, including DUII.\nDismissals for misdemeanors are generally eligible under ORS 137.225(1)(b). Exceptions include cases dismissed due to successful completion of DUII diversion.\"\"\"\n severity_level: str = \"Misdemeanor Class B\" # Might be technically inaccurate; but the time analyzer needs this.\n\n def type_eligibility(self, disposition):\n if ChargeUtil.dismissed(disposition):\n raise ValueError(\"Dismissed criminal charges should have been caught by another class.\")\n elif ChargeUtil.convicted(disposition):\n return TypeEligibility(EligibilityStatus.ELIGIBLE, reason=\"Eligible under 137.225(1)(b)\")\n else:\n return TypeEligibility(\n EligibilityStatus.ELIGIBLE,\n reason=\"Always eligible under 137.225(1)(b) for convictions, or 137.225(1)(d) for dismissals\",\n )\n", "sub_path": "src/backend/expungeservice/models/charge_types/misdemeanor_class_bc.py", "file_name": "misdemeanor_class_bc.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "expungeservice.models.charge.ChargeType", "line_number": 9, "usage_type": "name"}, {"api_name": "expungeservice.models.charge.ChargeUtil.dismissed", "line_number": 17, "usage_type": "call"}, {"api_name": "expungeservice.models.charge.ChargeUtil", "line_number": 17, "usage_type": "name"}, {"api_name": "expungeservice.models.charge.ChargeUtil.convicted", "line_number": 19, "usage_type": "call"}, {"api_name": "expungeservice.models.charge.ChargeUtil", "line_number": 19, "usage_type": "name"}, {"api_name": "expungeservice.models.expungement_result.TypeEligibility", "line_number": 20, "usage_type": "call"}, {"api_name": "expungeservice.models.expungement_result.EligibilityStatus.ELIGIBLE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "expungeservice.models.expungement_result.EligibilityStatus", "line_number": 20, "usage_type": "name"}, {"api_name": "expungeservice.models.expungement_result.TypeEligibility", "line_number": 22, "usage_type": "call"}, {"api_name": "expungeservice.models.expungement_result.EligibilityStatus.ELIGIBLE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "expungeservice.models.expungement_result.EligibilityStatus", "line_number": 23, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "564891201", "text": "# -*- coding: utf-8 -*-\n# for mac\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.metrics import auc\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom ml_algorithms.ml_algorithm_interface import AlgorithmInterface\n\n\nclass LogisticRegressionAlgorithm(AlgorithmInterface):\n def __init__(self):\n super(LogisticRegressionAlgorithm, self).__init__()\n\n def feature_engineering(self):\n self.convert_symbolic_feature_into_continuous()\n\n def train_phase(self):\n pipe_logistic_regression = Pipeline([('sc', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', LogisticRegression(random_state=1))\n ])\n\n # param_range = [10 ** c for c in range(-4, 4)]\n param_range = [0.0001, 0.001]\n hyper_parameter_grid = {\n 'clf__C': param_range,\n 'clf__gamma': param_range,\n 'clf__kernel': ['linear', 'rbf']\n }\n\n # Set up the random search with 4-fold cross validation\n self.classifier = RandomizedSearchCV(estimator=pipe_logistic_regression,\n param_distributions=hyper_parameter_grid,\n cv=4, n_iter=5,\n scoring='roc_auc',\n n_jobs=-1, verbose=2,\n return_train_score=True,\n random_state=42)\n\n # Fit on the training data\n self.classifier.fit(self.train_data, self.train_label)\n print(\"123\")\n\n def test_phase(self):\n y_predict = self.classifier.predict(self.test_data)\n print(\"accuracy: %f\" % accuracy_score(self.test_label, y_predict))\n print(\"precision: %f\" % precision_score(self.test_label, y_predict, average=\"macro\"))\n print(\"recall: %f\" % recall_score(self.test_label, y_predict, average=\"macro\"))\n\n fpr, tpr, thresholds = metrics.roc_curve(y_predict, self.test_label)\n plt.plot(fpr, tpr, marker='o')\n plt.show()\n auc_score = auc(fpr, tpr)\n print(\"AUC: %f\" % auc_score)\n", "sub_path": "ml_algorithms/logistic_regression.py", "file_name": "logistic_regression.py", "file_ext": "py", "file_size_in_byte": 2570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "ml_algorithms.ml_algorithm_interface.AlgorithmInterface", "line_number": 18, "usage_type": "name"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "611472083", "text": "from django.shortcuts import render\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\nimport requests\n\nusers = [\n r\"闲闲小可爱\",\n r\"基拉的左轮\",\n r\"云天挽歌\",\n r\"我爱两仪式\",\n r\"菠萝快乐车\",\n r\"丨ELUNE丨\",\n r\"技不如人,甘拜下风。\",\n r\"相思寄于山海\",\n r\"忧伤暗火\",\n r\"喵了個咪的\",\n r\"三谷加奈惠\",\n r\"珈百璃小惡魔\",\n r\"不愿透露姓名的青某\",\n #r\"Nirvana_Y\"\n]\n\ndef index(request):\n latest_stats_list = []\n for user in users:\n stat = {}\n userId = quote(user)\n # print(userId)\n \n # 拉数据\n stat = {}\n url = \"http://wotbox.ouj.com/wotbox/index.php?r=default%2Findex&pn={id}\".format(id=userId)\n resp = requests.get(url)\n webContent = resp.content\n soup = BeautifulSoup(webContent, 'html.parser')\n\n stat[\"id\"] = user\n stat[\"power\"] = soup.find(class_='power fl').span.get_text()\n stat[\"win_rate\"] = soup.find(class_='title win-rate-1k').next_sibling.next_sibling.get_text()\n stat[\"win_rate_c\"] = 1 if int(soup.find(class_='title win-rate-1k').next_sibling.next_sibling.get_text().strip('%')) >= 50 else 0 \n stat[\"total\"] = soup.find(class_='total').get_text()[2:]\n stat[\"win\"] = soup.findAll(class_=\"win\")[5].get_text()[2:]\n stat[\"fail\"] = soup.find(class_='fail').get_text()[2:]\n stat[\"damage\"] = soup.findAll(class_='num')[6].get_text()\n stat[\"exp\"] = soup.findAll(class_='num')[7].get_text()\n stat[\"destroy\"] = soup.findAll(class_='num')[8].get_text()\n stat[\"discover\"] = soup.findAll(class_='num')[11].get_text()\n stat[\"level\"] = soup.find(class_='title avg-lv-1k').next_sibling.next_sibling.get_text()\n stat[\"hit_rate\"] = soup.find(class_='title hit-rate-1k').next_sibling.next_sibling.get_text()\n for i in range(5):\n stat[\"p{n}c\".format(n=i+1)] = 1 if soup.findAll(class_='recent-list__right')[i].td.get_text() == '胜利' else 0 \n stat[\"p{n}\".format(n=i+1)] = soup.findAll(class_='recent-list__right')[i].td.next_sibling.next_sibling.get_text()\n print(stat) \n latest_stats_list.append(stat)\n context = {\n 'latest_stats_list': latest_stats_list,\n }\n return render(request, 'stats/index.html', context)\n", "sub_path": "stats/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2354, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.parse.quote", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "379975299", "text": "import argparse\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport torch \nfrom torch import nn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import models\nimport torchvision.datasets as dsets\nimport torch.utils.data as Data\nimport random\nfrom torch.autograd import Variable\nfrom torch import randn\nfrom torch import randint\nfrom model import Generator, Discriminator\n\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ncuda = True if torch.cuda.is_available() else False\n\n#parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-e\",\"--epochs\", type=int, default=200, help=\"number of epochs\")\nparser.add_argument(\"-b\",\"--batch_size\", type=int, default=32, help=\"batch size\")\nparser.add_argument(\"-mn\",\"--model_name\", type=str, default=\"model\", help=\"model name for saving\")\nparser.add_argument(\"-lr\",\"--learning_rate\", type=float, default=0.0002, help=\"learning rate\")\nopt = parser.parse_args()\nprint(opt)\n\nmodel_folder_path = \"models/\"+opt.model_name+\"/\"\nos.makedirs(model_folder_path,exist_ok=True)\n\n# parameters\nEPOCH = opt.epochs\nBATCH_SIZE = opt.batch_size\nLR = opt.learning_rate\nD_UPD_NUM = 1\nG_UPD_NUM = 1\n\n#######################################################################\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\nadversarial_loss = torch.nn.BCELoss().cuda()\nauxiliary_loss = torch.nn.MSELoss().cuda()\n\ndef update(iterator, generator, discriminator, optimizer_D, optimizer_G):\n \n for i, batch in enumerate(iterator):\n # Configure input\n real_imgs = batch[0].to(device)\n shuf_imgs = batch[1].to(device)\n text = batch[2].to(device)\n\n real_imgs = real_imgs.permute(0,3,1,2)\n shuf_imgs = shuf_imgs.permute(0,3,1,2)\n batch_size = real_imgs.shape[0]\n\n valid = torch.ones(batch_size,1, requires_grad=False).to(device)\n fake = torch.zeros(batch_size,1, requires_grad=False).to(device)\n # -----------------\n # Train Generator\n # -----------------\n for _ in range(3):\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n noise = randn(batch_size, 100).to(device)\n #noise = Variable(FloatTensor(np.random.normal(0,1,(batch_size, 100))))\n #gen_text = Variable(LongTensor(np.random.randint(0,22,batch_size)))\n gen_text_1 = torch.zeros(batch_size,12,requires_grad=False).to(device)\n gen_text_2 = torch.zeros(batch_size,10,requires_grad=False).to(device)\n one_pos_for_gtext_1 = randint(0,12,(batch_size,))\n one_pos_for_gtext_2 = randint(0,10,(batch_size,))\n for idx_1,text_1 in enumerate(gen_text_1): text_1[one_pos_for_gtext_1[idx_1]] = 1\n for idx_2,text_2 in enumerate(gen_text_2): text_2[one_pos_for_gtext_2[idx_2]] = 1\n gen_text = torch.cat((gen_text_1,gen_text_2),1)\n \n # Generate a batch of images\n gen_imgs = generator(noise, gen_text)\n\n # Loss measures generator's ability to fool the discriminator\n validity, pred_label = discriminator(gen_imgs, gen_text)\n \n pred_label = Variable(pred_label)\n #gen_text = Variable(gen_text)\n #gen_text = gen_text.long()\n g_loss_1 = adversarial_loss(validity, valid)\n #print(gen_text)\n #g_loss_2 = auxiliary_loss(pred_label, torch.argmax(gen_text,1))\n g_loss_2 = auxiliary_loss(pred_label, gen_text)\n \n g_loss = 0.5*(g_loss_1+g_loss_2)\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n for _ in range(1):\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n #loss for real images \n \n real_pred, real_aux = discriminator(real_imgs, text)\n d_real_loss_1 = adversarial_loss(real_pred, valid)\n d_real_loss_2 = auxiliary_loss(real_aux, text)\n d_real_loss = 0.5*(d_real_loss_1 + d_real_loss_2)\n \n #loss for fake image\n fake_pred, fake_aux = discriminator(gen_imgs.detach(), text)\n d_fake_loss_1 = adversarial_loss(fake_pred, fake) \n d_fake_loss_2 = auxiliary_loss(fake_aux, text)\n d_fake_loss = 0.5*(d_fake_loss_1 + d_fake_loss_2)\n \n #total d_loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n print(\n \" [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % ( i, len(iterator), d_loss.item(), g_loss.item()),\n end = '\\r'\n )\n\n\n#######################################################################\n\ntrain_df = pd.read_pickle('../train_img.pkl')\nreal_img_arr = np.concatenate(train_df.values[0],axis=0)[:]\nshuf_img_arr = real_img_arr\nnp.random.shuffle(shuf_img_arr)\n\n\n# ITER_NUM = len(real_img) // BATCH_SIZE\n# print(len(real_img_arr))\n# print(real_img_arr[0])\n# print(real_img_arr[0].shape)\n\ntags_df = pd.read_csv('../extra_data/tags.csv')\ntags = tags_df['attr']\ntags_list = []\ntags_num_list = []\nfor tag in tags:\n tag = tag[1]\n\nfor tag in tags:\n tags_list.append(tag.split())\n\n# ‘color hair’\n# 'orange hair', 0 ,'white hair', 1, 'aqua hair', 2, 'gray hair', 3\n# 'green hair', 4, 'red hair', 5, 'purple hair', 6, 'pink hair', 7\n# 'blue hair', 8, 'black hair', 9, 'brown hair', 10, 'blonde hair', 11\n# ‘color eyes’\n# 'black eyes', 12, 'orange eyes', 13\n# 'pink eyes', 14, 'yellow eyes', 15, 'aqua eyes', 16, 'purple eyes', 17\n# 'green eyes', 18, 'brown eyes', 19, 'red eyes', 20, 'blue eyes', 21\n# two hot \n\nfor tag_list in tags_list:\n if(tag_list[0] == 'orange'): num = 0\n elif(tag_list[0] == 'white'): num = 1\n elif (tag_list[0] == 'aqua'): num = 2\n elif (tag_list[0] == 'gray'): num = 3\n elif (tag_list[0] == 'green'): num = 4\n elif (tag_list[0] == 'red'): num = 5\n elif (tag_list[0] == 'purple'): num = 6\n elif (tag_list[0] == 'pink'): num = 7\n elif (tag_list[0] == 'blue'): num = 8\n elif (tag_list[0] == 'black'): num = 9\n elif (tag_list[0] == 'brown'): num = 10\n elif (tag_list[0] == 'blonde'): num = 11\n\n if(tag_list[2] == 'black'): num_2 = 12\n elif(tag_list[2] == 'orange'): num_2 = 13\n elif(tag_list[2] == 'pink'): num_2 = 14\n elif(tag_list[2] == 'yellow'): num_2 = 15\n elif(tag_list[2] == 'aqua'): num_2 = 16\n elif(tag_list[2] == 'purple'): num_2 = 17\n elif(tag_list[2] == 'green'): num_2 = 18\n elif(tag_list[2] == 'brown'): num_2 = 19\n elif(tag_list[2] == 'red'): num_2 = 20\n elif(tag_list[2] == 'blue'): num_2 = 21\n\n num_list = np.zeros(22)\n num_list[num] = 1\n num_list[num_2] = 1\n tags_num_list.append(num_list)\n\nreal_img_tensor = torch.from_numpy(real_img_arr).float()\nshuf_img_tensor = torch.from_numpy(shuf_img_arr).float()\ntags_tensor = torch.Tensor(tags_num_list)\ntorch_dataset = Data.TensorDataset(real_img_tensor, shuf_img_tensor, tags_tensor)\n\nloader = Data.DataLoader( \n dataset = torch_dataset,\n batch_size = BATCH_SIZE,\n shuffle = True,\n num_workers = 2 \n)\n\nG = Generator().cuda()\nD = Discriminator().cuda()\n\noptimizer_g = optim.Adam(G.parameters(),lr= LR*0.5, betas = (0.5, 0.99))\noptimizer_d = optim.Adam(D.parameters(),lr= LR, betas = (0.5, 0.99))\n\n\nfor i in range(EPOCH):\n #train\n print(\"start training epoch\"+str(i))\n \n update(loader, G, D,optimizer_d, optimizer_g)\n #for j in range(D_UPD_NUM): \n # update_d(loader, G, D, optimizer_d)\n #for j in range(G_UPD_NUM):\n # update_g(BATCH_SIZE, ITER_NUM, G, D, optimizer_g)\n if(i%5==4):\n torch.save(G.state_dict(), model_folder_path+'G'+ str(i)+'.pkl')\n torch.save(D.state_dict(), model_folder_path+'D'+ str(i)+'.pkl')\n", "sub_path": "hw3/hw3-2 3-3/hw3_2/acgan/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 7932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "torch.device", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 22, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn.BCELoss", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 197, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 199, "usage_type": "name"}, {"api_name": "model.Generator", "line_number": 206, "usage_type": "call"}, {"api_name": "model.Discriminator", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 224, "usage_type": "call"}]} +{"seq_id": "156055738", "text": "from lxml.html import fromstring\nimport requests\nfrom itertools import cycle\nimport traceback\nimport urllib3\n\n\ndef get_proxies():\n url = 'https://hidemy.name/api/proxylist.txt?country=US&maxtime=1700&type=hs&out=plain&lang=en&utf&code=709444465162232'\n response = requests.get(url)\n parser = response.text\n print(parser)\n # proxies = set()\n # for i in parser:\n # proxy = i\n # print(i)\n # proxies.add(proxy)\n # return proxies\n return parser\n\n\nif __name__ == \"__main__\":\n thing = get_proxies()\n proxies = thing.split()\n print(proxies)\n # If you are copy pasting proxy ips, put in the list below\n # proxies = ['121.129.127.209:80', '124.41.215.238:45169', '185.93.3.123:8080', '194.182.64.67:3128', '106.0.38.174:8080', '163.172.175.210:3128', '13.92.196.150:8080']\n # proxies = get_proxies()\n\n proxy_pool = cycle(proxies)\n\n url = 'https://httpbin.org/ip'\n for i in range(1, len(proxies) - 1):\n # Get a proxy from the pool\n proxy = next(proxy_pool)\n print(\"Request #%d\" % i)\n try:\n response = requests.get(\n url, proxies={\"https\": \"http://\" + proxy, \"http\": \"http://\" + proxy})\n print(response.json())\n except:\n # Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.\n # We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url\n print(\"Skipping. Connnection error\")\n", "sub_path": "read-file.py", "file_name": "read-file.py", "file_ext": "py", "file_size_in_byte": 1557, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "48147273", "text": "# -*- coding: utf-8 -*-\nimport urwid, sys\nfrom programs import Program\n\n\ndef get_status():\n status = \"Status: \"\n if Program.running:\n e = Program.running.error()\n if e:\n status += \"ERROR\"\n else:\n status += \"RUNNING\"\n status += \" (\" + Program.running.__class__.__name__ + \") \"\n if e:\n status += \"\\n\" + str(e) + \" (\" + e.__class__.__name__ + \") \"\n else:\n status += \"OFF\"\n\n return status\n\nstatus = urwid.Text(get_status())\n\ndef menu(choices):\n body = [urwid.Divider(\"-\"), urwid.Text(\"ANGEZEIGE\", align='center'), urwid.Divider(\"-\"), status, urwid.Divider(\"-\"), urwid.Text(\"Choose Program:\")]\n for choice in choices:\n button = urwid.Button(choice)\n urwid.connect_signal(button, 'click', item_chosen, user_args = [choice])\n body.append(urwid.AttrMap(button, None, focus_map='reversed'))\n body.append(urwid.Divider(\"-\"))\n button = urwid.Button(\"EXIT\")\n urwid.connect_signal(button, 'click', exit_application)\n body.append(urwid.AttrMap(button, None, focus_map='reversed'))\n \n return urwid.ListBox(urwid.SimpleFocusListWalker(body))\n\ndef item_chosen(choice, button):\n body = [urwid.Divider(\"-\"), urwid.Text(choice, align='center'), urwid.Divider(\"-\"), urwid.Text(\"Parameters:\")]\n\n params = {}\n for p, v in Program.getPromotedPrograms()[choice].getParams().items():\n #body.append(urwid.Text())\n edit = urwid.Edit(caption = u\"▸ \" + p.title() + \": \", edit_text = v)\n body.append(urwid.AttrMap(edit, None, focus_map='reversed'))\n #body.append(urwid.Divider())\n params[p] = edit\n\n body.append(urwid.Divider(\"-\"))\n\n ok = urwid.Button(u'Ok')\n back = urwid.Button(u'Back')\n \n urwid.connect_signal(ok, 'click', start_program, user_args = [choice, params])\n urwid.connect_signal(back, 'click', show_menu)\n \n tOk = urwid.AttrMap(ok, None, focus_map='reversed')\n \n body.append(tOk)\n body.append(urwid.AttrMap(back, None, focus_map='reversed'))\n\n mainWidget.original_widget = urwid.Filler(urwid.Pile(body, focus_item=tOk))\n\ndef start_program(choice, params, button):\n cParams = {}\n for p in params:\n cParams[p] = params[p].get_edit_text()\n\n if Program.running:\n Program.running.stop()\n Program.running.join()\n\n p = Program.getPromotedPrograms()[choice](**cParams)\n p.start()\n show_menu()\n\ndef exit_application(button):\n raise urwid.ExitMainLoop()\n\ndef show_menu(button = None):\n mainWidget.original_widget = listMenu\n \ndef get_info(mainLoop, data):\n status.set_text(get_status())\n mainLoop.set_alarm_in(1, get_info)\n\ndef choose():\n top = urwid.Overlay(mainWidget, urwid.SolidFill(u'\\N{MEDIUM SHADE}'),\n align='center', width=('relative', 60),\n valign='middle', height=('relative', 60),\n min_width=20, min_height=9)\n show_menu()\n mainLoop = urwid.MainLoop(top, palette=[('reversed', 'standout', '')])\n mainLoop.set_alarm_in(0, get_info)\n mainLoop.run()\n\n\nlistMenu = menu(Program.promotedPrograms.keys())\nmainWidget = urwid.Padding(None, left=1, right=1)\n", "sub_path": "chooser.py", "file_name": "chooser.py", "file_ext": "py", "file_size_in_byte": 3134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "programs.Program.running", "line_number": 8, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 8, "usage_type": "name"}, {"api_name": "programs.Program.running.error", "line_number": 9, "usage_type": "call"}, {"api_name": "programs.Program.running", "line_number": 9, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 9, "usage_type": "name"}, {"api_name": "programs.Program.running", "line_number": 14, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 14, "usage_type": "name"}, {"api_name": "urwid.Text", "line_number": 22, "usage_type": "call"}, {"api_name": "urwid.Divider", "line_number": 25, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 25, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 27, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 28, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 29, "usage_type": "call"}, {"api_name": "urwid.Divider", "line_number": 30, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 31, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 32, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 33, "usage_type": "call"}, {"api_name": "urwid.ListBox", "line_number": 35, "usage_type": "call"}, {"api_name": "urwid.SimpleFocusListWalker", "line_number": 35, "usage_type": "call"}, {"api_name": "urwid.Divider", "line_number": 38, "usage_type": "call"}, {"api_name": "urwid.Text", "line_number": 38, "usage_type": "call"}, {"api_name": "programs.Program.getPromotedPrograms", "line_number": 41, "usage_type": "call"}, {"api_name": "programs.Program", "line_number": 41, "usage_type": "name"}, {"api_name": "urwid.Edit", "line_number": 43, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 44, "usage_type": "call"}, {"api_name": "urwid.Divider", "line_number": 48, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 50, "usage_type": "call"}, {"api_name": "urwid.Button", "line_number": 51, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 53, "usage_type": "call"}, {"api_name": "urwid.connect_signal", "line_number": 54, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 56, "usage_type": "call"}, {"api_name": "urwid.AttrMap", "line_number": 59, "usage_type": "call"}, {"api_name": "urwid.Filler", "line_number": 61, "usage_type": "call"}, {"api_name": "urwid.Pile", "line_number": 61, "usage_type": "call"}, {"api_name": "programs.Program.running", "line_number": 68, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 68, "usage_type": "name"}, {"api_name": "programs.Program.running.stop", "line_number": 69, "usage_type": "call"}, {"api_name": "programs.Program.running", "line_number": 69, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 69, "usage_type": "name"}, {"api_name": "programs.Program.running.join", "line_number": 70, "usage_type": "call"}, {"api_name": "programs.Program.running", "line_number": 70, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 70, "usage_type": "name"}, {"api_name": "programs.Program.getPromotedPrograms", "line_number": 72, "usage_type": "call"}, {"api_name": "programs.Program", "line_number": 72, "usage_type": "name"}, {"api_name": "urwid.ExitMainLoop", "line_number": 77, "usage_type": "call"}, {"api_name": "urwid.Overlay", "line_number": 87, "usage_type": "call"}, {"api_name": "urwid.SolidFill", "line_number": 87, "usage_type": "call"}, {"api_name": "urwid.MainLoop", "line_number": 92, "usage_type": "call"}, {"api_name": "programs.Program.promotedPrograms.keys", "line_number": 97, "usage_type": "call"}, {"api_name": "programs.Program.promotedPrograms", "line_number": 97, "usage_type": "attribute"}, {"api_name": "programs.Program", "line_number": 97, "usage_type": "name"}, {"api_name": "urwid.Padding", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "138853865", "text": "#!/usr/bin/env python\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\nfrom scripts.supervised_model_utils import score_model, load_model, prepare_data_for_model, load_model_params, \\\n add_supervised_target\n\n\ndef find_production_dates(model_params):\n sup_df = prepare_data_for_model(country=model_params[\"country\"],\n mode=\"production\",\n resampling_method=model_params[\"resampling_method\"],\n variables=model_params[\"variables\"],\n hm_days=model_params[\"hm_days\"],\n functions=model_params[\"functions\"],\n day_windows=model_params[\"day_windows\"],\n verbose=0)\n\n start = sup_df.index.min()\n end = sup_df.index.max()\n\n return start, end, sup_df\n\n\ndef evaluate_production_performance(df, hm_days):\n df = add_supervised_target(df, hm_days=hm_days)\n\n df = df.iloc[:-hm_days]\n\n return df[\"target\"]\n\n\ndef plot_production_results(true, preds):\n fig, ax = plt.subplots(1, 2, figsize=(20, 6))\n ax[0].set_title(\"true vs pred, production data\")\n ax[0].plot(preds, label=\"preds\")\n ax[0].plot(true, label=\"true\")\n ax[0].set_ylabel(\"revenue\")\n ax[0].legend()\n\n ax[1].set_title(\"absolute error, production data\")\n ax[1].plot(abs(preds - true))\n ax[1].set_ylabel(\"error\")\n plt.show()\n\n\ndef compute_production_error(true, preds):\n prod_mae = mean_absolute_error(true, preds)\n prod_rmse = mean_squared_error(true, preds, squared=False)\n\n prod_error = np.mean([prod_mae, prod_rmse])\n prod_error = round(prod_error, 2)\n\n return prod_error\n\n\ndef monitor(country, hm_days):\n model, model_name = load_model(country_name=country)\n model_params = load_model_params(model_name)\n\n start, end, sup_df = find_production_dates(model_params)\n\n starting_dates = list(pd.date_range(start, end))\n\n prod_preds = score_model(starting_dates, model_name, test=False, mode=\"production\")\n\n prod_true = evaluate_production_performance(sup_df, hm_days)\n cleaned_prod_preds = pd.Series(prod_preds[:-hm_days],\n index=prod_true.index).apply(lambda x: x[0])\n\n prod_error = compute_production_error(prod_true, cleaned_prod_preds)\n\n print(f\"Error on production data: {prod_error}\")\n\n plot_production_results(prod_true, cleaned_prod_preds)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='monitor production data')\n parser.add_argument('-c', '--country', required=True, help='name of the country or None')\n parser.add_argument('-d', '--hm_days', default=30, type=int, help='how many days in the future to predict')\n\n args = parser.parse_args()\n\n monitor(args.country, args.hm_days)\n", "sub_path": "monitoring.py", "file_name": "monitoring.py", "file_ext": "py", "file_size_in_byte": 2955, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "scripts.supervised_model_utils.prepare_data_for_model", "line_number": 14, "usage_type": "call"}, {"api_name": "scripts.supervised_model_utils.add_supervised_target", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "call"}, {"api_name": "scripts.supervised_model_utils.load_model", "line_number": 62, "usage_type": "call"}, {"api_name": "scripts.supervised_model_utils.load_model_params", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 67, "usage_type": "call"}, {"api_name": "scripts.supervised_model_utils.score_model", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 72, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "17282822", "text": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Iterable, Type\n\nimport pytest\n\nfrom pants.backend.python.dependency_inference.rules import import_rules\nfrom pants.backend.python.goals import lockfile\nfrom pants.backend.python.goals.lockfile import GeneratePythonLockfile\nfrom pants.backend.python.subsystems.python_tool_base import LockfileRules, PythonToolBase\nfrom pants.backend.python.target_types import ConsoleScript\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.core.goals import generate_lockfiles\nfrom pants.core.goals.generate_lockfiles import GenerateLockfilesGoal, GenerateToolLockfileSentinel\nfrom pants.engine.rules import QueryRule\nfrom pants.engine.target import Dependencies, SingleSourceField, Target\nfrom pants.engine.unions import UnionRule\nfrom pants.testutil.rule_runner import RuleRunner\nfrom pants.util.ordered_set import FrozenOrderedSet\n\n\ndef _get_generated_lockfile_sentinel(\n rules: Iterable, subsystem: Type[PythonToolBase]\n) -> Type[GenerateToolLockfileSentinel]:\n \"\"\"Fish the generated lockfile sentinel out of the pool of rules so it can be used in a\n QueryRule.\"\"\"\n return next(\n r\n for r in rules\n if isinstance(r, UnionRule)\n and r.union_base == GenerateToolLockfileSentinel\n and issubclass(r.union_member, GenerateToolLockfileSentinel)\n and r.union_member.resolve_name == subsystem.options_scope\n ).union_member\n\n\nclass FakeToolWithSimpleLocking(PythonToolBase):\n options_scope = \"cowsay\"\n name = \"Cowsay\"\n help = \"A tool to test pants\"\n\n default_version = \"cowsay==5.0\"\n default_main = ConsoleScript(\"cowsay\")\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n default_lockfile_resource = (\"\", \"cowsay.lock\")\n lockfile_rules_type = LockfileRules.SIMPLE\n\n\nclass MockSourceField(SingleSourceField):\n pass\n\n\nclass MockDependencies(Dependencies):\n pass\n\n\nclass MockTarget(Target):\n alias = \"tgt\"\n core_fields = (MockSourceField, MockDependencies)\n\n\n@pytest.fixture\ndef rule_runner() -> RuleRunner:\n lockfile_sentinel = _get_generated_lockfile_sentinel(\n FakeToolWithSimpleLocking.rules(), FakeToolWithSimpleLocking\n )\n rule_runner = RuleRunner(\n rules=[\n *lockfile.rules(),\n *generate_lockfiles.rules(),\n *import_rules(),\n *FakeToolWithSimpleLocking.rules(),\n QueryRule(GeneratePythonLockfile, [lockfile_sentinel]),\n ],\n target_types=[MockTarget],\n )\n\n rule_runner.write_files(\n {\"project/example.ext\": \"\", \"project/BUILD\": \"tgt(source='example.ext')\"}\n )\n return rule_runner\n\n\ndef test_simple_python_lockfile(rule_runner):\n \"\"\"Test that the `LockfileType.PEX_SIMPLE` resolved the graph and generates the lockfile.\"\"\"\n result = rule_runner.run_goal_rule(\n GenerateLockfilesGoal,\n args=[\n \"--resolve=cowsay\",\n \"--cowsay-lockfile=aaa.lock\",\n ],\n env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"},\n )\n assert result\n lockfile_content = rule_runner.read_file(\"aaa.lock\")\n assert (\n dedent(\n f\"\"\"\\\n // \"generated_with_requirements\": [\n // \"{FakeToolWithSimpleLocking.default_version}\"\n // ],\n \"\"\"\n )\n in lockfile_content\n )\n\n\ndef test_setup_lockfile(rule_runner) -> None:\n global_constraint = \"CPython<4,>=3.8\"\n\n lockfile_sentinel = _get_generated_lockfile_sentinel(\n FakeToolWithSimpleLocking.rules(), FakeToolWithSimpleLocking\n )\n\n def assert_lockfile_request(\n build_file: str,\n expected_ics: list[str],\n *,\n extra_expected_requirements: list[str] | None = None,\n extra_args: list[str] | None = None,\n ) -> None:\n rule_runner.write_files({\"project/BUILD\": build_file, \"project/f.py\": \"\"})\n rule_runner.set_options(\n [\"--cowsay-lockfile=lockfile.txt\", *(extra_args or [])],\n env={\"PANTS_PYTHON_INTERPRETER_CONSTRAINTS\": f\"['{global_constraint}']\"},\n env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"},\n )\n lockfile_request = rule_runner.request(GeneratePythonLockfile, [lockfile_sentinel()])\n assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected_ics)\n assert lockfile_request.requirements == FrozenOrderedSet(\n [\n FakeToolWithSimpleLocking.default_version,\n *FakeToolWithSimpleLocking.default_extra_requirements,\n *(extra_expected_requirements or ()),\n ]\n )\n\n assert_lockfile_request(\n \"python_sources()\", FakeToolWithSimpleLocking.default_interpreter_constraints\n )\n assert_lockfile_request(\"target()\", FakeToolWithSimpleLocking.default_interpreter_constraints)\n # Since the SIMPLE locking mechanism doesn't look at ICs, this will still use tool ICs.\n assert_lockfile_request(\n \"python_sources(interpreter_constraints=['CPython<4,>=3.7'])\",\n FakeToolWithSimpleLocking.default_interpreter_constraints,\n )\n", "sub_path": "src/python/pants/backend/python/util_rules/lockfile_test.py", "file_name": "lockfile_test.py", "file_ext": "py", "file_size_in_byte": 5331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "typing.Iterable", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 27, "usage_type": "name"}, {"api_name": "pants.backend.python.subsystems.python_tool_base.PythonToolBase", "line_number": 27, "usage_type": "name"}, {"api_name": "pants.engine.unions.UnionRule", "line_number": 34, "usage_type": "argument"}, {"api_name": "pants.core.goals.generate_lockfiles.GenerateToolLockfileSentinel", "line_number": 35, "usage_type": "name"}, {"api_name": "pants.core.goals.generate_lockfiles.GenerateToolLockfileSentinel", "line_number": 36, "usage_type": "argument"}, {"api_name": "typing.Type", "line_number": 28, "usage_type": "name"}, {"api_name": "pants.core.goals.generate_lockfiles.GenerateToolLockfileSentinel", "line_number": 28, "usage_type": "name"}, {"api_name": "pants.backend.python.subsystems.python_tool_base.PythonToolBase", "line_number": 41, "usage_type": "name"}, {"api_name": "pants.backend.python.target_types.ConsoleScript", "line_number": 47, "usage_type": "call"}, {"api_name": "pants.backend.python.subsystems.python_tool_base.LockfileRules.SIMPLE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pants.backend.python.subsystems.python_tool_base.LockfileRules", "line_number": 53, "usage_type": "name"}, {"api_name": "pants.engine.target.SingleSourceField", "line_number": 56, "usage_type": "name"}, {"api_name": "pants.engine.target.Dependencies", "line_number": 60, "usage_type": "name"}, {"api_name": "pants.engine.target.Target", "line_number": 64, "usage_type": "name"}, {"api_name": "pants.testutil.rule_runner.RuleRunner", "line_number": 74, "usage_type": "call"}, {"api_name": "pants.backend.python.goals.lockfile.rules", "line_number": 76, "usage_type": "call"}, {"api_name": "pants.backend.python.goals.lockfile", "line_number": 76, "usage_type": "name"}, {"api_name": "pants.core.goals.generate_lockfiles.rules", "line_number": 77, "usage_type": "call"}, {"api_name": "pants.core.goals.generate_lockfiles", "line_number": 77, "usage_type": "name"}, {"api_name": "pants.backend.python.dependency_inference.rules.import_rules", "line_number": 78, "usage_type": "call"}, {"api_name": "pants.engine.rules.QueryRule", "line_number": 80, "usage_type": "call"}, {"api_name": "pants.backend.python.goals.lockfile.GeneratePythonLockfile", "line_number": 80, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pants.testutil.rule_runner.RuleRunner", "line_number": 70, "usage_type": "name"}, {"api_name": "pants.core.goals.generate_lockfiles.GenerateLockfilesGoal", "line_number": 94, "usage_type": "argument"}, {"api_name": "textwrap.dedent", "line_number": 104, "usage_type": "call"}, {"api_name": "pants.backend.python.goals.lockfile.GeneratePythonLockfile", "line_number": 135, "usage_type": "argument"}, {"api_name": "pants.backend.python.util_rules.interpreter_constraints.InterpreterConstraints", "line_number": 136, "usage_type": "call"}, {"api_name": "pants.util.ordered_set.FrozenOrderedSet", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "359989376", "text": "#-*- coding:utf-8 -*-\n# author:isyuan\n# datetime:27/03/2019 14:57\n# software: PyCharm\n\nimport os\nimport smtplib\nimport time\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nimport mail_setting\n\nclass Mail:\n '''\n self.mail_host = \"smtp.sina.com\" # 设置服务器\n self.mail_user = \"xiaowang\" # 用户名\n self.mail_pass = \"XXXXX\" # 口令\n self.mail_sender = 'xiaowang@sina.com' # 发送者\n '''\n\n def __init__(self, mail_host=\"210.77.136.200\", mail_user=\"user\", mail_pass=\"pass\",\n mail_sender=\"da山\", port=465):\n # 第三方 SMTP 服务\n self.mail_host = mail_host\n self.mail_user = mail_user\n self.mail_pass = mail_pass\n self.mail_sender = mail_sender\n self.port = port\n\n\n def SendHtmlMail(self, mail_tolist, mail_subject, mail_body, fileList, mail_cclist, mail_bcclist):\n '''\n 发送Html邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n message = MIMEText(mail_body, _subtype='html', _charset='gb2312')\n message['Subject'] = mail_subject\n message['From'] = self.mail_sender\n if len(mail_cclist) > 0:\n message['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n if len(mail_bcclist) > 0:\n message['Bcc'] = \",\".join(mail_bcclist)\n mail_tolist.extend(mail_bcclist)\n\n try:\n smtpObj = smtplib.SMTP(self.mail_host, self.port)\n # smtpObj.connect(self.mail_host, 25) # 25 为 SMTP 端口号\n # smtpObj.login(self.mail_user, self.mail_pass)\n smtpObj.sendmail(self.mail_sender, mail_tolist, message.as_string())\n smtpObj.close()\n print(\"邮件发送成功\")\n except smtplib.SMTPException as e:\n print(\"Error: 无法发送邮件\")\n\n def SendMailAttach(self, mail_tolist, mail_subject, mail_body, fileList, mail_cclist):\n '''\n 发送带附件的邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n msg = MIMEMultipart()\n message = MIMEText(mail_body, _subtype='plain', _charset='utf-8')\n msg.attach(message)\n\n # 构造附件\n for f in fileList:\n if os.path.isfile(f):\n att = MIMEText(open(f, 'rb').read(), 'base64', 'utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att[\"Content-Disposition\"] = 'attachment;filename=' + os.path.basename(f)\n msg.attach(att)\n\n msg['Subject'] = mail_subject\n msg['From'] = self.mail_sender\n msg['To'] = \",\".join(mail_tolist)\n if len(mail_cclist) > 0:\n msg['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n # if len(mail_bcclist) > 0:\n # msg['Bcc'] = \",\".join(mail_bcclist)\n # mail_tolist.extend(mail_bcclist)\n\n message = ''\n try:\n server = smtplib.SMTP()\n server.connect(self.mail_host)\n server.login(self.mail_user, self.mail_pass)\n server.sendmail(self.mail_sender, mail_tolist, msg.as_string())\n server.close()\n result = '邮件发送成功'\n\n except smtplib.SMTPException as e:\n # print \"Error: 无法发送邮件\", e\n message = 'Error: 无法发送邮件:'\n return message\n\n def SendMail(self, mail_subject, mail_body, mail_tolist,mail_cclist):\n '''\n 发送普通邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n message = MIMEText(mail_body, _subtype='plain', _charset='utf-8')\n message['Subject'] = mail_subject\n message['From'] = self.mail_sender\n if mail_tolist:\n message['To'] = \",\".join(mail_tolist)\n if len(mail_cclist) > 0:\n message['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n\n result = ''\n try:\n server = smtplib.SMTP()\n server.connect(self.mail_host)\n server.login(self.mail_user,self.mail_pass)\n server.sendmail(self.mail_sender, mail_tolist, message.as_string())\n server.close()\n result = '邮件发送成功'\n # print \"邮件发送成功\"\n except smtplib.SMTPException as e:\n result = 'Error: 无法发送邮件'\n return result\n\n def test(self,mail_body,mail_subject,mail_sendto_user,fileName):\n\n fileList = []\n fileList.append(fileName)\n\n mail_tolist = []\n mail_tolist.append(mail_sendto_user)\n\n # 多个人,中间用逗号分隔\n # cc_tolist = ['xx','dd']\n cc_tolist =[]\n mail_bcclist = []\n\n # result = self.SendMail( mail_subject, mail_body, mail_tolist,cc_tolist)\n result = self.SendMailAttach(mail_tolist,mail_subject, mail_body,fileList, cc_tolist)\n return result\n\n\n# setobj = mail_setting.mail_setting()\n# m = Mail(setobj.mail_host,setobj.mail_user,setobj.mail_pass,setobj.mail_send_user,setobj.port)\n# # result = m.test('信息1',u'小测试测试',setobj.mail_receive_user)\n# fileName=\"D:\\\\work\\\\python36_crawl\\\\src\\\\2018-07-02-14-01-55.csv\"\n# result = m.test('信息1',u'小测试测试',setobj.mail_receive_user,fileName)\n# print (result)", "sub_path": "AutoFramework/utils/sendEmail1.py", "file_name": "sendEmail1.py", "file_ext": "py", "file_size_in_byte": 6703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "email.mime.text.MIMEText", "line_number": 43, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 54, "usage_type": "call"}, {"api_name": "smtplib.SMTPException", "line_number": 60, "usage_type": "attribute"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 74, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "email.mime.text.MIMEText", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "smtplib.SMTP", "line_number": 98, "usage_type": "call"}, {"api_name": "smtplib.SMTPException", "line_number": 105, "usage_type": "attribute"}, {"api_name": "email.mime.text.MIMEText", "line_number": 121, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 132, "usage_type": "call"}, {"api_name": "smtplib.SMTPException", "line_number": 139, "usage_type": "attribute"}]} +{"seq_id": "301639469", "text": "\"\"\"\nThe dreammarket sink module provides the ability to scrape raw data (HTML) from\nthe onion site that is hosting it, then (if specified) save it to disk, send it\nthrough an ingestionion point, and save it in a datastore.\n\"\"\"\nimport os\nimport logging\n\nfrom pyvirtualdisplay import Display\n\nfrom dminer.ingestion.dreammarket import DreammarketParser\nfrom dminer.stores.interfaces import ElasticsearchInterface, STDOutInterface\nfrom dreammarket import *\n\nlogger = logging.getLogger(__name__)\n\n\ndef prepare_cli(parser):\n \"\"\"\n Prepares the CLI subgroup parser by adding arguments specific to the\n dreammarket sink. It also sets the entry point for the CLI to use when\n specifying this subgroup.\n \"\"\"\n # Sink related arguments\n parser.add_argument(\n \"-u\", \"--dreammarket-username\",\n default=os.environ.get(\"DMINER_SINK_DREAMMARKET_USERNAME\", None),\n help=\"\"\"\n Specifies the username to use for the login form on Dream Market. It is\n also able to be specified as an environment variable: DMINER_SINK_DREAMMARKET_USERNAME.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-p\", \"--dreammarket-password\",\n default=os.environ.get(\"DMINER_SINK_DREAMMARKET_PASSWORD\", None),\n help=\"\"\"\n Specifies the password to use for the login form on Dream Market. It is\n also able to be specified as an environment variable: DMINER_SINK_DREAMMARKET_PASSWORD.\n This is a required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-k\", \"--dbc-access-key\",\n default=os.environ.get(\"DMINER_DBC_ACCESS_KEY\", None),\n help=\"\"\"\n Specifies the access key to use for deathbycaptcha. It is also able to\n be specified as an environment variable: DMINER_DBC_ACCESS_KEY.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-s\", \"--dbc-secret-key\",\n default=os.environ.get(\"DMINER_DBC_SECRET_KEY\", None),\n help=\"\"\"\n Specifies the secret key to use for deathbycaptcha. It is also able to\n be specified as an environment variable: DMINER_DBC_SECRET_KEY.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"--onion-url\",\n default=os.environ.get(\n \"DMINER_SINK_DREAMMARKET_ONION_URL\", \"http://lchudifyeqm4ldjj.onion\"\n ),\n help=\"\"\"\n Specifies the onion URL to use for this marketplace. It is also able to\n be specified as an environment variable: DMINER_SINK_DREAMMARKET_ONION_URL.\n This is required for this sink module. The default is: %(default)s.\n \"\"\"\n )\n\n url_category_exclusive_group = parser.add_mutually_exclusive_group()\n url_category_exclusive_group.add_argument(\n \"--url-file\",\n default=None,\n help=\"\"\"\n Specifies the file to use for defining URLs to be consumed by the\n scraper. If specified, only the URL's in the file will be scraped, and\n the sink will exit after all URL's from the file have been exhausted.\n \"\"\"\n )\n url_category_exclusive_group.add_argument(\n \"--category\",\n default=\"digital_goods.hacking\",\n help=\"\"\"\n Specifies the category to pull URLS from for consumption by the\n scraper. If specified, URL's will be pulled dynamically, and the\n category specified will be used to look up where to pull the URLs.\n The default is '%(default)s'.\n \"\"\"\n )\n\n parser.add_argument(\n \"--daemonize\",\n action=\"store_true\",\n help=\"\"\"\n If specified, the scraper will be put into a daemon mode, which will\n repeatedly run, refreshing URLS to scrape based on the CLI options\n provided (either --category or --url-file).\n \"\"\"\n )\n parser.add_argument(\n \"--request-interval\",\n default=15, type=int,\n help=\"\"\"\n The request interval is the maximum amount of time to wait in between\n requests for each page being scraped. The actual amount of time in\n between requests is random, ranging between 0 and the interval\n specified. The default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--request-retries\",\n default=5, type=int,\n help=\"\"\"\n The request retry metric is used to determine how many attempts should\n be made to scrape a particular page before skipping the URL. The\n default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--request-timeout\",\n default=5, type=int,\n help=\"\"\"\n The request timeout metric is used to determine how long a request\n should persist without a response. The default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--save-to-directory\",\n default=None,\n help=\"\"\"\n If specified, the sink will attempt to save all scraped HTML files to\n the specified directory.\n \"\"\"\n )\n parser.add_argument(\n \"-v\", \"--verbosity\",\n default=\"info\",\n choices=[\"debug\", \"info\", \"warn\", \"error\"],\n help=\"\"\"\n Controls the verbosity of the ingestion point. Default is %(default)s.\n \"\"\"\n )\n\n # Flag to also perform ingestion\n parser.add_argument(\n \"--ingest\",\n action=\"store_true\",\n help=\"\"\"\n If specified, the sink will pass all scraped HTML files to the Alpha\n Bay ingestion point, with the ingestion point being configured to use\n the specified datstore interface.\n \"\"\"\n )\n\n # Datastore related arguments\n parser.add_argument(\n \"--datastore\",\n default=\"stdout\",\n choices=[\"stdout\", \"elasticsearch\"],\n help=\"\"\"\n Specify the datastore to use during ingestion. The default datastore is\n %(default)s.\n \"\"\"\n )\n parser.add_argument(\n \"--datastore-host\",\n default=\"localhost\",\n help=\"\"\"\n Specify the datastore remote host. The default host is %(default)s.\n \"\"\"\n )\n parser.add_argument(\n \"--datastore-port\",\n default=9200,\n help=\"\"\"\n Specify the datastore remote port. The default port is %(default)s.\n \"\"\"\n )\n parser.set_defaults(func=entry)\n\n\ndef entry(arguments):\n \"\"\"\n The entry point for the dreammarket sink CLI interface. This defines the\n logic around the usage of command line arguments and the dreammarket sink in\n order to perform scraping, ingestion, and storage related functions.\n \"\"\"\n logger.setLevel(arguments.verbosity.upper())\n if not arguments.dreammarket_username:\n logger.error(\"This sink requires a username to be specified through CLI or enviornment variable.\")\n raise SystemExit()\n if not arguments.dreammarket_password:\n logger.error(\"This sink requires a password to be specified through CLI or environment variable.\")\n raise SystemExit()\n\n if not arguments.dbc_access_key:\n logger.error(\"This sink requires a deathbycaptcha access key to be specified through CLI or environment variable.\")\n raise SystemExit()\n if not arguments.dbc_secret_key:\n logger.error(\"This sink requires a deathbycaptcha secret key to be specified through CLI or environment variable.\")\n raise SystemExit()\n\n\n display = Display(visible=0, size=(1366, 768))\n display.start()\n sink = DreammarketSink(\n arguments.dreammarket_username, arguments.dreammarket_password,\n arguments.dbc_access_key, arguments.dbc_secret_key,\n url_file=arguments.url_file,\n save_to_directory=arguments.save_to_directory,\n onion_url=arguments.onion_url,\n request_interval=arguments.request_interval,\n request_retries=arguments.request_retries,\n request_timeout=arguments.request_timeout,\n category=arguments.category\n )\n sink.logger = logger\n\n if arguments.ingest:\n if arguments.datastore == \"stdout\":\n store = STDOutInterface()\n\n parser = DreammarketParser(datastore=store)\n parser.parse(scrape_results=sink.scrape())\n\n elif arguments.datastore == \"elasticsearch\":\n store = ElasticsearchInterface(\n host=arguments.datastore_host,\n port=arguments.datastore_port\n )\n\n parser = DreammarketParser(datastore=store)\n parser.parse(\n scrape_results=sink.scrape(\n daemon=arguments.daemonize\n )\n )\n else:\n list(sink.scrape())\n display.stop()\n", "sub_path": "dminer/sinks/dreammarket/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 8668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 54, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 63, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pyvirtualdisplay.Display", "line_number": 207, "usage_type": "call"}, {"api_name": "dminer.stores.interfaces.STDOutInterface", "line_number": 224, "usage_type": "call"}, {"api_name": "dminer.ingestion.dreammarket.DreammarketParser", "line_number": 226, "usage_type": "call"}, {"api_name": "dminer.stores.interfaces.ElasticsearchInterface", "line_number": 230, "usage_type": "call"}, {"api_name": "dminer.ingestion.dreammarket.DreammarketParser", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "354817780", "text": "import xlrd as xl\nimport re, json, io\n\n\ndef special_match(strg, search=re.compile(r'[^0-9.]').search):\n return not bool(search(strg))\n\ndef unify_row(row):\n temprow = row\n if temprow:\n if not temprow.strip()[0].isdigit():\n temprow = temprow.replace(' -', temprow[:temprow.find(';') + 2], 1).strip()\n temprow = temprow[currow.find(';') + 1:]\n while ' -' in temprow:\n temprow = temprow.replace(' -', temprow[temprow.find('г.') + 2:temprow.find(';', temprow.find('г.'))+1])\n print(temprow)\n return temprow\n\ndef get_dates(dates):\n #dates = dates[:len(dates)\n temp = list(filter(None, dates.split(';')))\n temp2 = list()\n date_arr = []\n cur_day, cur_month, cur_year = '', '', ''\n for each in temp:\n temp2.append(list(filter(None, each.strip().split(','))))\n if temp2:\n for x in temp2[::-1]:\n for y in x[::-1]:\n temp = y\n cur_day = ''\n if special_match(temp):\n if '.' in temp:\n if temp.count('.') == 2:\n cur_year = temp[temp.rfind('.'):]\n temp = temp[:temp.rfind('.')]\n if temp.count('.') == 1:\n cur_month = temp[temp.rfind('.'):]\n temp = temp[:temp.rfind('.')]\n if '.' not in temp:\n cur_day = temp\n date_arr.append(str(cur_day) + str(cur_month) + str(cur_year))\n # print(temp)\n return sorted(date_arr)\n\n\nfile = xl.open_workbook(\"/home/inteldoter/Downloads/schedule.xls\", encoding_override=\"cp1252\")\nsheet = file.sheet_by_index(0)\nrows = []\noutput = []\nfor rownum in range(11, sheet.nrows):\n rows.append(sheet.row_values(rownum))\nfor row in rows:\n curpair = int(row[1]) if row[1] else 0\n curlist = []\n cursubj, cursubjtype, curprof = '', '', ''\n currow = str(row[2])\n if 'День самоподготовк' in currow:\n currow = ''\n currow = currow.replace('б; ', 'б ##').replace('.; ', '. ##')\n if not currow.split('##'):\n curlist = list(unify_row(currow))\n else:\n for each in currow.split('##'):\n curlist.append(unify_row(each))\n for each in curlist:\n temp = each\n while temp:\n if ':' in temp:\n curprof = temp[temp.rfind(':') + 2:].strip()\n temp = temp[:temp.rfind(':')]\n curdates = (get_dates(temp[:temp.find('г.')]))\n temp = temp[temp.find('г.') + 2:]\n cursubj = temp[:temp.find(';')].strip()\n temp = temp[temp.find(';') + 1:]\n temp = temp.replace('теория; практика', 'теория, практика')\n cursubjtype = temp[:temp.find(';')].strip() if ';' in temp else temp.strip()\n temp = temp[temp.find(';'):]\n if temp and curpair:\n # print(curpair, curprof, curdates, cursubj, cursubjtype)\n for each in curdates:\n output.append({\"date\": each, \"order\": curpair, \"name\": cursubj, \"subjType\": cursubjtype, \"subjProf\": curprof})\n\njson_string = json.dumps({\"SubjArr\": output}, ensure_ascii=False).encode('utf-8')\nwith io.open('data.json', 'w', encoding='utf8') as json_file:\n json.dump({\"subjArr\": output}, json_file, ensure_ascii=False)\n\n", "sub_path": "source.py", "file_name": "source.py", "file_ext": "py", "file_size_in_byte": 3422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "re.compile", "line_number": 5, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "io.open", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "508542716", "text": "from django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom kiteclub.views import HomeView, Index6View, PrivacyView, LocalizationView, SitemapView\nfrom events.views import EventsView, SingleEventView, AboutView, HistoryView, ClubEventsView, FaqsView\nfrom blog.views import ArticlesView, SingleArticleView\nfrom lessons.views import LessonsView, SingleLessonView\nfrom video.views import VideosView, SingleVideoView\nfrom contacts.views import MailView\nfrom gallery.views import GalleryView\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', HomeView.as_view()),\n url(r'^mail/$', MailView.as_view()),\n url(r'^mail/feedback/$', MailView.as_view()),\n url(r'^about/$', AboutView.as_view()),\n url(r'^faqs/$', FaqsView.as_view()),\n url(r'^history/$', HistoryView.as_view()),\n url(r'^index6/$', Index6View.as_view()),\n url(r'^events/$', EventsView.as_view()),\n url(r'^club-events/$', ClubEventsView.as_view()),\n url(r'^events/([0-9a-zA-Z-]+)$', SingleEventView.as_view()),\n url(r'^articles/$', ArticlesView.as_view()),\n url(r'^articles/([0-9a-zA-Z-]+)$', SingleArticleView.as_view()),\n url(r'^lessons/$', LessonsView.as_view()),\n url(r'^lessons/([0-9a-zA-Z-]+)$', SingleLessonView.as_view()),\n url(r'^videos/$', VideosView.as_view()),\n url(r'^videos/([0-9a-zA-Z-]+)$', SingleVideoView.as_view()),\n url(r'^gallery/$', GalleryView.as_view()),\n url(r'^privacy/$', PrivacyView.as_view()),\n url(r'^sitemap.xml$', SitemapView.as_view()),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^i18n/(\\w+)/', LocalizationView.as_view()),\n url(r'^ckeditor/', include('ckeditor.urls')),\n\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT, 'show_indexes': True}),\n )\n", "sub_path": "kiteclub/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2050, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "kiteclub.views.HomeView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "kiteclub.views.HomeView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "contacts.views.MailView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "contacts.views.MailView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "contacts.views.MailView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "contacts.views.MailView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "events.views.AboutView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "events.views.AboutView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "events.views.FaqsView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "events.views.FaqsView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "events.views.HistoryView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "events.views.HistoryView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "kiteclub.views.Index6View.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "kiteclub.views.Index6View", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "events.views.EventsView.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "events.views.EventsView", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "events.views.ClubEventsView.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "events.views.ClubEventsView", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "events.views.SingleEventView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "events.views.SingleEventView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "blog.views.ArticlesView.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "blog.views.ArticlesView", "line_number": 24, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "blog.views.SingleArticleView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "blog.views.SingleArticleView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "lessons.views.LessonsView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "lessons.views.LessonsView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "lessons.views.SingleLessonView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "lessons.views.SingleLessonView", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "video.views.VideosView.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "video.views.VideosView", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "video.views.SingleVideoView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "video.views.SingleVideoView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "gallery.views.GalleryView.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "gallery.views.GalleryView", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "kiteclub.views.PrivacyView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "kiteclub.views.PrivacyView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "kiteclub.views.SitemapView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "kiteclub.views.SitemapView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "kiteclub.views.LocalizationView.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "kiteclub.views.LocalizationView", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "650496566", "text": "#!/home/cimatori/installed/anaconda/bin/python\n\"\"\"\nCompute statistics of plus/minus increments as in Zhou & Xia 2002\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as ppl\n\nimport h5py\n\nfrom MyLib import Statistics, hist2plot\n\n# Load paramters\nimport ConfigPM\nreload(ConfigPM)\nfrom ConfigPM import *\n\nppl.close('all')\n\nsets = (range(72,108), range(108,144), range(0,36), range(36,72))\n\n# Increment labels\ndeltar = '$\\\\Delta \\\\theta_r$'\nspacing = '$r$ $\\\\mathrm{[m]}$'\n\nF = ppl.figure(figsize=(15,11))\nF.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.97, \\\n wspace=0, hspace=0)\n\nfor nset,Thms in enumerate(sets):\n setName = '{}-{}'.format(Thms[0]+1,Thms[-1]+1)\n TideFile = OutDir+'results/PMIncrements_{}_StepX_{}_day_{}_{}.npz' \\\n .format(setName,StepX,Start,End)\n print ('Load previously computed results for plotting only.')\n Data = np.load(TideFile)\n for k,v in Data.iteritems():\n exec('{}=v'.format(k))\n del Data\n\n # Define colors\n colorsdT = ppl.cm.get_cmap('Dark2',ndTs)(range(ndTs))\n\n ax = F.add_subplot(2,2,nset+1)\n\n pl = []; labs = []\n for p in xrange(nT):\n if namT[p]==\"down\":\n mfcol = colorsT[p]\n else:\n mfcol = 'none'\n s, = ax.plot(dXs, skPlus[:,p], \\\n marker=markPM['Plus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **pStyle)\n pl.append(s)\n labs.append('$\\\\Delta_r\\\\theta^+$, {}'.format(namT[p]))\n s, = ax.plot(dz, skPlusZ[p], \\\n marker=markPM['Plus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **p2Style)\n pl.append(s)\n labs.append('$\\\\Delta_z\\\\theta^+$, {}'.format(namT[p]))\n s, = ax.plot(dXs, skMinus[:,p], \\\n marker=markPM['Minus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **pStyle)\n pl.append(s)\n labs.append('$\\\\Delta_r\\\\theta^-$, {}'.format(namT[p]))\n s, = ax.plot(dz, skMinusZ[p], \\\n marker=markPM['Minus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **p2Style)\n pl.append(s)\n labs.append('$\\\\Delta_z\\\\theta^-$, {}'.format(namT[p]))\n \n # Mooring section name\n ax.text(0.85,0.85, setLabels[setName], size='xx-large', transform=ax.transAxes)\n\n ax.set_xscale('log')\n\n ax.set_xlim(0.1,900)\n ax.set_ylim(0,12)\n\n if nset==2:\n ax.legend(pl, labs, numpoints=1, fontsize=16, ncol=2, loc='lower left')\n\n if nset in (0,2):\n ax.set_ylabel('$\\\\mu_3\\\\left(\\\\Delta_r\\\\theta^\\pm\\\\right)$, ' + \\\n '$\\\\mu_3\\\\left(\\\\Delta_z\\\\theta^\\pm\\\\right)$', \\\n fontsize='xx-large')\n else:\n ax.set_yticklabels('')\n if nset>1:\n ax.set_xlabel(spacing, fontsize='xx-large')\n else:\n ax.set_xticklabels('')\n\nF.savefig(OutDir+'figures/Taylor_StepX_{}' \\\n .format(StepX) + \\\n '/Skewness_PMincrm_summary_day_{}_{}.tif' \\\n .format(Start,End), dpi=300)\n", "sub_path": "LIS131/TemperatureDissipation/plot_PM_summary.py", "file_name": "plot_PM_summary.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 41, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "106786342", "text": "from datetime import datetime\nimport pandas as pd\n\n\ndef convert_date_to_formatted_str(date, frequency):\n \"\"\"\n Converts datetime to a string basing on the frequency passed.\n \"\"\"\n if frequency == 'monthly':\n return datetime.strftime(date, '%B %Y')\n elif frequency == 'daily':\n return datetime.strftime(date, '%Y-%m-%d')\n else:\n return datetime.strftime(date, '%Y-%m-%d %H:%M')\n\n\ndef date_to_formatted_str(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%d %H:%M')\n\n\ndef str_to_date(st):\n \"\"\"\n Converts a string to datetime\n \"\"\"\n return datetime.strptime(st, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n\ndef date_to_str(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n\ndef date_to_str2(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%dT%H:%M:%SZ')\n\n\ndef str_to_date_find(st):\n \"\"\"\n Converts a string of different format to datetime\n \"\"\"\n return datetime.strptime(st, '%Y-%m-%dT%H:%M:%SZ')\n\n\ndef generate_datetime(date, time):\n if date is None or date == \"\":\n return None\n else:\n if time is None or time == \"\":\n time = \"00:00\"\n date_time = date+\"T\"+time+\":00Z\"\n return date_time\n\n\ndef set_pm25_category_background(pm25_conc_value):\n category_color = \"\"\n if pm25_conc_value > 0.0 and pm25_conc_value <= 12.0:\n category_color = '#45e50d'\n elif pm25_conc_value > 12.0 and pm25_conc_value <= 35.4:\n category_color = '#f8fe28'\n elif pm25_conc_value > 35.4 and pm25_conc_value <= 55.4:\n category_color = '#ee8310'\n elif pm25_conc_value > 55.4 and pm25_conc_value <= 150.4:\n category_color = '#fe0000'\n elif pm25_conc_value > 150.4 and pm25_conc_value <= 250.4:\n category_color = '#8639c0'\n elif pm25_conc_value > 250.4 and pm25_conc_value <= 500.4:\n category_color = '#81202e'\n else:\n category_color = '#808080'\n\n return category_color\n\n\ndef assign_color_to_pollutant_category(pollutant_category):\n category_color = \"\"\n if pollutant_category == 'Good':\n category_color = '#45e50d'\n elif pollutant_category == 'Moderate':\n category_color = '#f8fe28'\n elif pollutant_category == 'UH4SG':\n category_color = '#ee8310'\n elif pollutant_category == 'Unhealthy':\n category_color = '#fe0000'\n elif pollutant_category == 'Very Unhealthy':\n category_color = '#8639c0'\n elif pollutant_category == 'Hazardous':\n category_color = '#81202e'\n else:\n category_color = '#808080'\n\n return category_color\n\n\ndef flattencolumns(df1, cols):\n df = pd.concat([pd.DataFrame(df1[x].values.tolist()).add_prefix(x)\n for x in cols], axis=1)\n return pd.concat([df, df1.drop(cols, axis=1)], axis=1)\n", "sub_path": "pipeline/cloud-functions/python/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 2897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "datetime.datetime.strftime", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "5223071", "text": "import numpy as np\nimport pandas as pd\nimport KNNLearner as knn\nimport LinRegLearner as ll\nimport BagLearner as bl\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom util import get_data, plot_data\n\n# get price data: Sine, IBM\nstart_date = dt.datetime(2007,12,31)\nend_date = dt.datetime(2009,12,31)\nsymbols = ['IBM','SINE_FAST','SINE_SLOW','GOOG','AAPL']\ndates = pd.date_range(start_date, end_date)\nprices_all = get_data(symbols, dates)\n\ntag = 'IBM'\npibm = prices_all[tag]\n\n# contruct features X\ndef get_feature(pibm):\n indates = dates[1:]\n sma = pibm.rolling(window = 20, min_periods=0)\n bbup = sma.mean() + 2*sma.std() \n bblow = sma.mean() - 2*sma.std() \n bbvals = (pibm[1:] - sma.mean()[1:])/(4*sma.std()[1:])\n vtl = sma.std()[1:]/sma.mean()[1:]*8\n #mmtn1 = pibm.values[1:]/pibm.values[:-1]-1 \n #mmtn2 = pibm.values[2:]/pibm.values[:-2]-1\n #mmtn3 = pibm.values[3:]/pibm.values[:-3]-1\n #mmtn4 = pibm.values[4:]/pibm.values[:-4]-1\n mmtn5 = pibm.values[5:]/pibm.values[:-5]-1\n X = pd.DataFrame({'x0':bbvals[4:-5], 'vtl':vtl[4:-5],'x5':mmtn5[:-5]})\n return X, bbvals[4:-5]\n\n# construct Y\ndef get_Y(pibm):\n Y = pibm.values[5:] \n Y = Y[5:]/Y[:-5] - 1\n return Y\n\ndef trade_naive(pfl):\n for idx in range(pfl.shape[0]-1):\n if pfl['pred'].ix[idx] < pfl['pred'].ix[idx+1]:\n if pfl['shares'].ix[idx] <= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] - pfl['price'].ix[idx]*(100 - pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = 100\n if pfl['pred'].ix[idx] > pfl['pred'].ix[idx+1]:\n if pfl['shares'].ix[idx] >= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] + pfl['price'].ix[idx]*(100 + pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = -100\n pv = pfl['price']*pfl['shares'] + pfl['cash']\n return pv \n\ndef trade(pfl):\n if pfl['price'].ix[0] < pfl['pema40'].ix[0] and pfl['pema40'].ix[0] < pfl['pema40'].ix[5]:\n pfl['shares'][:] = 100\n pfl['cash'][:] = pfl['cash'].ix[0] - pfl['price'].ix[0]*100\n elif pfl['price'].ix[0] > pfl['pema40'].ix[0] and pfl['pema40'].ix[0] > pfl['pema40'].ix[5]:\n pfl['shares'][:] = -100\n pfl['cash'][:] = pfl['cash'].ix[0] + pfl['price'].ix[0]*100\n\n sigs = pfl['price'].values - pfl['ema40'].values\n for idx in range(1, pfl.shape[0]):\n if sigs[idx]*sigs[idx-1] < 0:\n if sigs[idx] > 0 and pfl['shares'].ix[idx] <= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] - pfl['price'].ix[idx]*(100 - pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = 100\n if sigs[idx] < 0 and pfl['shares'].ix[idx] >= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] + pfl['price'].ix[idx]*(100 + pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = -100\n return pfl\n\ndef train(X, Y):\n kl = knn.KNNLearner()\n Ypred = np.zeros(Y.size)\n Ypred[:5] = Y[:5]\n for i in range(5, X.shape[0]):\n kl.addEvidence(X.values[:i], Y[:i])\n Ypred[i] = kl.query(X.values[i])[0]\n return Ypred, kl\n\n \n \n#----------------------In-sample test-----------------------------#\nX, _ = get_feature(pibm)\nY = get_Y(pibm)\n\nYpred, kl = train(X, Y)\n# convert predicted Y back to price, in-sample backtest\nppred = pibm.values[5:-5]*(Ypred + 1)\n\npdiff = pd.DataFrame(index = pibm.index[10:], data = {'price':pibm.values[10:], 'pred':ppred})\nplot_data(pdiff)\n\nppred = pd.Series(index = pibm.index[10:], data = ppred)# convert numpy array to pandas.Series\nema40 = pibm.ewm(span = 40, min_periods=0).mean()\npema40 = pd.concat((pibm[:10],ppred)).ewm(span = 40, min_periods=0).mean()\n# initial portfolio\npfl = pd.DataFrame({'price':pibm[10:], 'ema40':ema40[10:], 'pema40':pema40[10:], 'shares':np.zeros(pibm.size-10), 'cash':np.ones(pibm.size-10)*10000})\n\n\n# trading\npfl = trade(pfl)\npv = pfl['price']*pfl['shares'] + pfl['cash']\npspy = prices_all['SPY'][pfl.index]\npfl_vs_spy = pd.DataFrame(index = pfl.index, data = {'my_portval':pv/pv.ix[0], 'SPY':pspy/pspy.ix[0]})\nplot_data(pfl_vs_spy, title = \"My_Portfolio vs SPY\", ylabel = \"Accumulative Return\")\n\n\n#------------------------Out-Sample test---------------------------# \ntsd = dt.datetime(2009,12,31)\nted = dt.datetime(2011,12,31)\nsymbols = [tag]\ndates = pd.date_range(tsd, ted)\ntprices = get_data(symbols, dates)\ntpibm = tprices[tag]\n\ntX, _ = get_feature(tpibm)\n# compare to the true price\ntYpred = kl.query(tX.values)\ntppred = tpibm.values[5:-5]*(tYpred + 1)\ntppred = pd.Series(index = tpibm.index[10:], data = tppred)# convert numpy array to pandas.Series\n#tppred = tpibm.values[5:-5]*(tX.values.dot(clf.coef_.T) + clf.intercept_ + 1)\ntema40 = tpibm.ewm(span = 40, min_periods=0).mean()\ntpema40 = pd.concat((tpibm[:10],tppred)).ewm(span = 40, min_periods=0).mean()\n# initial portfolio\ntpfl = pd.DataFrame({'price':pibm[10:], 'ema40':tema40[10:], 'pema40':tpema40[10:], 'shares':np.zeros(pibm.size-10), 'cash':np.ones(pibm.size-10)*10000})\n\ntpdiff = pd.DataFrame(index = tpibm.index[10:], data = {'price':tpibm.values[10:], 'pred':tppred})\nplot_data(tpdiff)\n\ntpfl = pd.DataFrame({'price':tpibm[10:], 'pred':tppred, 'shares':np.zeros(tppred.size), 'cash':np.ones(tppred.size)*10000})\n\ntpfl = trade(tpfl)\ntpv = tpfl['price']*tpfl['shares'] + tpfl['cash']\ntpspy = tprices['SPY'][tpfl.index]\ntpfl_vs_tspy = pd.DataFrame(index = tpfl.index, data = {'my_portval':tpv/tpv.ix[0], 'SPY':tpspy/tpspy.ix[0]})\nplot_data(tpfl_vs_tspy, title = \"My_Portfolio vs SPY\", ylabel = \"Accumulative Return\")\n\n\n# For report\n\n\n", "sub_path": "p4/trade.py", "file_name": "trade.py", "file_ext": "py", "file_size_in_byte": 5577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "datetime.datetime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 15, "usage_type": "call"}, {"api_name": "util.get_data", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "KNNLearner.KNNLearner", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "util.plot_data", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "util.plot_data", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 116, "usage_type": "call"}, {"api_name": "util.get_data", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 129, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 131, "usage_type": "call"}, {"api_name": "util.plot_data", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 139, "usage_type": "call"}, {"api_name": "util.plot_data", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "337346904", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nfrom atm.core import auth,main\n\nuser_data={'account_id':None,\n 'authentication':False,\n 'account_data':None}\nprint('登录')\nauth.login(user_data)\nwith open('product') as data:\n product_data = json.load(data)\n\nwhile True:\n for name,values in product_data.items():\n print(name,values)\n choose=input('请输入要购买的物品或输入q退出')\n if choose=='q':\n break\n main.handle(user_data, 'consume',product_data[choose])\n", "sub_path": "shopping/Shopping.py", "file_name": "Shopping.py", "file_ext": "py", "file_size_in_byte": 530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "atm.core.auth.login", "line_number": 10, "usage_type": "call"}, {"api_name": "atm.core.auth", "line_number": 10, "usage_type": "name"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "atm.core.main.handle", "line_number": 20, "usage_type": "call"}, {"api_name": "atm.core.main", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "33123459", "text": "import os\nimport errno\nimport yaml\nimport zipfile\nfrom etl.payload_data import PayloadData\nfrom io import BytesIO\nfrom posixpath import basename\nfrom urllib.parse import urlparse\n\n\ndef get_yaml(yaml_file):\n '''\n Returns a YAML object\n '''\n with open(os.path.join(yaml_file), 'r') as yamlReader:\n return yaml.load(yamlReader, Loader=yaml.FullLoader)\n\n\ndef get_file_name_from_uri(uri):\n parsed_path = urlparse(uri).path\n return basename(parsed_path)\n\n\ndef get_file_ext(path):\n return os.path.splitext(path)[1]\n\n\ndef decompress(archive_binary_data, supported_file_extensions=None):\n if not zipfile.is_zipfile(archive_binary_data):\n print('warning: passed argument is not an archive.')\n return archive_binary_data\n\n archive = zipfile.ZipFile(archive_binary_data)\n files_to_extract = []\n decompressed_files = []\n\n if not supported_file_extensions:\n for name in archive.namelist():\n decompressed_files.append(PayloadData(\n name, BytesIO(archive.read(name))))\n return decompressed_files\n\n for compressed_filename in archive.namelist():\n compressed_file_extension = get_file_ext(compressed_filename)\n if compressed_file_extension in supported_file_extensions:\n files_to_extract.append(compressed_filename)\n\n for name in files_to_extract:\n decompressed_files.append(PayloadData(\n name, BytesIO(archive.read(name))))\n return decompressed_files\n\n\n# remove a file, suppress error if file removal fails\ndef silentremove(filename):\n try:\n os.remove(filename)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occurred\n\n# Convert NoneType to string\ndef xstr(s):\n if s is None:\n return ''\n return str(s)\n", "sub_path": "etl/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 16, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 20, "usage_type": "call"}, {"api_name": "posixpath.basename", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "zipfile.is_zipfile", "line_number": 29, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 33, "usage_type": "call"}, {"api_name": "etl.payload_data.PayloadData", "line_number": 39, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 40, "usage_type": "call"}, {"api_name": "etl.payload_data.PayloadData", "line_number": 49, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 50, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 57, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "26385218", "text": "import torch\r\nfrom torchvision import transforms\r\nfrom torchvision import datasets\r\nfrom torch.utils.data import DataLoader\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport matplotlib.pyplot as plt\r\n\r\n# prepare dataset\r\n\r\nbatch_size = 64\r\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\r\n\r\ntrain_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)\r\ntrain_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)\r\ntest_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)\r\n\r\ntest_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\ninput = [\r\n 3,4,6,5,7,\r\n 2,4,6,8,2,\r\n 1,6,7,8,4,\r\n 9,7,4,6,2,\r\n 3,7,5,4,1\r\n]\r\ninput = torch.Tensor(input).view(1,1,5,5)\r\nconv_layer = torch.nn.Conv2d(1,1,kernel_size=3,stride=2,padding=1,bias=False)\r\nkernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)\r\n\r\nconv_layer.weight.data = kernel.data\r\n\r\noutput = conv_layer(input)\r\nprint(output)\r\n\r\n\r\nclass Net(torch.nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)\r\n self.conv2 = torch.nn.Conv2d(10,20,kernel_size=5)\r\n self.pooling = torch.nn.MaxPool2d(2)\r\n self.fc = torch.nn.Linear(320,10)\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n x = F.relu(self.pooling(self.conv1(x)))\r\n x = F.relu(self.pooling(self.conv2(x)))\r\n x = x.view(batch_size , - 1)\r\n x = self.fc(x)\r\n return x\r\nmodel = Net()\r\nmodel.to(device)\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)\r\n\r\ndef train(epoch):\r\n running_loss = 0.0\r\n for batch_idx,data in enumerate(train_loader,0):\r\n input,target = data\r\n inputs,target = input.to(device),target.to(device)\r\n optimizer.zero_grad()\r\n\r\n outputs=model(inputs)\r\n loss = criterion(outputs,target)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n\r\n running_loss += loss.item()\r\n if batch_idx % 300 == 299:\r\n print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))\r\n running_loss = 0.0\r\n\r\n\r\ndef test():\r\n correct = 0\r\n total = 0\r\n with torch.no_grad():\r\n for data in test_loader:\r\n images, labels = data\r\n images, labels = images.to(device), labels.to(device)\r\n outputs = model(images)\r\n _, predicted = torch.max(outputs.data, dim=1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n print('accuracy on test set: %d %% ' % (100 * correct / total))\r\n return correct / total\r\n\r\n\r\nif __name__ == '__main__':\r\n epoch_list = []\r\n acc_list = []\r\n\r\n for epoch in range(10):\r\n train(epoch)\r\n acc = test()\r\n epoch_list.append(epoch)\r\n acc_list.append(acc)\r\n\r\n plt.plot(epoch_list, acc_list)\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.show()\r\n", "sub_path": "chart_10_1.py", "file_name": "chart_10_1.py", "file_ext": "py", "file_size_in_byte": 3214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.relu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "399878503", "text": "import numpy\nimport matplotlib.pyplot as plt\n\n#Create the arrays that represents the values of the x and y axis:\n\nx = [1,2,3,5,6,7,8,9,10,12,13,14,15,16,18,19,21,22]\ny = [100,90,80,60,60,55,60,65,70,70,75,76,78,79,90,99,99,100]\n\n#NumPy has a method that lets us make a polynomial model:\n\nmymodel = numpy.poly1d(numpy.polyfit(x, y, 3))\n\n#Then specify how the line will display, we start at position 1, and end at position 22:\n\nmyline = numpy.linspace(1, 22, 100)\n\n#Draw the original scatter plot:\n\nplt.scatter(x, y)\n\n#Draw the line of polynomial regression:\n\nplt.plot(myline, mymodel(myline))\n\n#Display the diagram:\n\nplt.show()", "sub_path": "dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "numpy.poly1d", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "581531114", "text": "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAn example for computing correlation matrix.\nRun with:\n bin/spark-submit examples/src/main/python/ml/correlation_example.py\n\"\"\"\nfrom __future__ import print_function\n\n# $example on$\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.stat import Correlation\n# $example off$\nfrom pyspark.sql import SparkSession\nimport time\nimport sys\n\n\npartition = float(sys.argv[1])\nnum_parts = int(sys.argv[2])\ninput_file = sys.argv[3]\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .appName(\"CorrelationExample\") \\\n .getOrCreate()\n\n # $example on$\n df = spark.read.format(\"libsvm\").load(input_file)\n\n df = df.sample(False, partition).coalesce(num_parts)\n\n start = time.time()\n r1 = Correlation.corr(df, \"features\").head()\n end = time.time()\n run_time = end - start\n print(\"time: \" + str(run_time))\n print(\"Pearson correlation matrix:\\n\" + str(r1[0]))\n\n\n # $example off$\n\n spark.stop()\n", "sub_path": "spark-mllib/pearson.py", "file_name": "pearson.py", "file_ext": "py", "file_size_in_byte": 1738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 39, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 39, "usage_type": "name"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "pyspark.ml.stat.Correlation.corr", "line_number": 50, "usage_type": "call"}, {"api_name": "pyspark.ml.stat.Correlation", "line_number": 50, "usage_type": "name"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "473185740", "text": "\"\"\"Platform for Garmin Connect integration.\"\"\"\nfrom __future__ import annotations\n\nimport logging\n\nfrom homeassistant.components.sensor import SensorEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_ATTRIBUTION, CONF_ID, DEVICE_CLASS_TIMESTAMP\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom .alarm_util import calculate_next_active_alarms\nfrom .const import (\n ATTRIBUTION,\n DATA_COORDINATOR,\n DOMAIN as GARMIN_DOMAIN,\n GARMIN_ENTITY_LIST,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities\n) -> None:\n \"\"\"Set up Garmin Connect sensor based on a config entry.\"\"\"\n coordinator: DataUpdateCoordinator = hass.data[GARMIN_DOMAIN][entry.entry_id][\n DATA_COORDINATOR\n ]\n unique_id = entry.data[CONF_ID]\n\n entities = []\n for (\n sensor_type,\n (name, unit, icon, device_class, enabled_by_default),\n ) in GARMIN_ENTITY_LIST.items():\n\n _LOGGER.debug(\n \"Registering entity: %s, %s, %s, %s, %s, %s\",\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_by_default,\n )\n entities.append(\n GarminConnectSensor(\n coordinator,\n unique_id,\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_by_default,\n )\n )\n\n async_add_entities(entities)\n\n\nclass GarminConnectSensor(CoordinatorEntity, SensorEntity):\n \"\"\"Representation of a Garmin Connect Sensor.\"\"\"\n\n def __init__(\n self,\n coordinator,\n unique_id,\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_default: bool = True,\n ):\n \"\"\"Initialize a Garmin Connect sensor.\"\"\"\n super().__init__(coordinator)\n\n self._unique_id = unique_id\n self._type = sensor_type\n self._name = name\n self._unit = unit\n self._icon = icon\n self._device_class = device_class\n self._enabled_default = enabled_default\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend.\"\"\"\n return self._icon\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n if not self.coordinator.data or not self.coordinator.data[self._type]:\n return None\n\n value = self.coordinator.data[self._type]\n if \"Duration\" in self._type or \"Seconds\" in self._type:\n value = value // 60\n elif \"Mass\" in self._type or self._type == \"weight\":\n value = value / 1000\n elif self._type == \"nextAlarm\":\n active_alarms = calculate_next_active_alarms(\n self.coordinator.data[self._type]\n )\n if active_alarms:\n value = active_alarms[0]\n else:\n value = None\n\n if self._device_class == DEVICE_CLASS_TIMESTAMP:\n return value\n\n return round(value, 2)\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return the unique ID for this sensor.\"\"\"\n return f\"{self._unique_id}_{self._type}\"\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self._unit\n\n @property\n def extra_state_attributes(self):\n \"\"\"Return attributes for sensor.\"\"\"\n if not self.coordinator.data:\n return {}\n\n attributes = {\n \"source\": self.coordinator.data[\"source\"],\n \"last_synced\": self.coordinator.data[\"lastSyncTimestampGMT\"],\n ATTR_ATTRIBUTION: ATTRIBUTION,\n }\n if self._type == \"nextAlarm\":\n attributes[\"next_alarms\"] = calculate_next_active_alarms(\n self.coordinator.data[self._type]\n )\n\n return attributes\n\n @property\n def device_info(self) -> DeviceInfo:\n \"\"\"Return device information.\"\"\"\n return {\n \"identifiers\": {(GARMIN_DOMAIN, self._unique_id)},\n \"name\": \"Garmin Connect\",\n \"manufacturer\": \"Garmin Connect\",\n }\n\n @property\n def entity_registry_enabled_default(self) -> bool:\n \"\"\"Return if the entity should be enabled when first added to the entity registry.\"\"\"\n return self._enabled_default\n\n @property\n def available(self) -> bool:\n \"\"\"Return True if entity is available.\"\"\"\n return (\n super().available\n and self.coordinator.data\n and self._type in self.coordinator.data\n )\n\n @property\n def device_class(self):\n \"\"\"Return the device class of the sensor.\"\"\"\n return self._device_class\n", "sub_path": "custom_components/garmin_connect/sensor.py", "file_name": "sensor.py", "file_ext": "py", "file_size_in_byte": 5099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 28, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 28, "usage_type": "name"}, {"api_name": "homeassistant.helpers.update_coordinator.DataUpdateCoordinator", "line_number": 31, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 31, "usage_type": "name"}, {"api_name": "const.DATA_COORDINATOR", "line_number": 32, "usage_type": "name"}, {"api_name": "homeassistant.const.CONF_ID", "line_number": 34, "usage_type": "name"}, {"api_name": "const.GARMIN_ENTITY_LIST.items", "line_number": 40, "usage_type": "call"}, {"api_name": "const.GARMIN_ENTITY_LIST", "line_number": 40, "usage_type": "name"}, {"api_name": "homeassistant.helpers.update_coordinator.CoordinatorEntity", "line_number": 67, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorEntity", "line_number": 67, "usage_type": "name"}, {"api_name": "alarm_util.calculate_next_active_alarms", "line_number": 114, "usage_type": "call"}, {"api_name": "homeassistant.const.DEVICE_CLASS_TIMESTAMP", "line_number": 122, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_ATTRIBUTION", "line_number": 146, "usage_type": "name"}, {"api_name": "const.ATTRIBUTION", "line_number": 146, "usage_type": "name"}, {"api_name": "alarm_util.calculate_next_active_alarms", "line_number": 149, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 159, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity.DeviceInfo", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "432226178", "text": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom mercurysms import sheets\nfrom mercurysms import twilio\nfrom mercurysms import worker\nfrom mercurysms.forms import SendSMSForm\n\nSHEETS_KEY = getattr(settings, \"SHEETS_KEY\", None)\nSHEETS_GID = getattr(settings, \"SHEETS_GID\", None)\n\nbg_worker = worker.Worker()\n\nclass SendSMSView(LoginRequiredMixin, TemplateView):\n template_name = 'sendSMS.html'\n\n def __init__(self):\n super(SendSMSView, self).__init__()\n self.sheet = sheets.Sheet(SHEETS_KEY, SHEETS_GID)\n bg_worker = worker.Worker()\n\n def get_context_data(self, **kwargs):\n con = super(SendSMSView, self).get_context_data(**kwargs)\n lists = self.sheet.lists\n con.update({'form': SendSMSForm(lists), 'lists': lists,\n 'sheets_url': sheets.SHEETS_URL.format(key=SHEETS_KEY, gid=SHEETS_GID)})\n return con\n\n def post(self, request, *args, **kwargs):\n lists = request.POST.getlist('lists')\n numbers = set()\n for list_ in lists:\n numbers = numbers.union(set(self.sheet.get_list(list_)))\n message = request.POST.get('message')\n bg_worker.start_process(message, list(numbers))\n return redirect('sending')\n\n\n@login_required\ndef succesfully_sent(request):\n nums = bg_worker.err_nums\n cost = bg_worker.cost\n bg_worker.reset()\n context = {'nums': nums, 'cost': cost}\n return render(request, 'sms_sent.html', context=context)\n\n\n@login_required\ndef sending(request):\n return render(request, 'sending.html')\n\n\ndef status(request):\n data = { 'finished': bg_worker.done }\n return JsonResponse(data)\n", "sub_path": "mercurysms/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.settings", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "argument"}, {"api_name": "mercurysms.worker.Worker", "line_number": 16, "usage_type": "call"}, {"api_name": "mercurysms.worker", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 18, "usage_type": "name"}, {"api_name": "mercurysms.sheets.Sheet", "line_number": 23, "usage_type": "call"}, {"api_name": "mercurysms.sheets", "line_number": 23, "usage_type": "name"}, {"api_name": "mercurysms.worker.Worker", "line_number": 24, "usage_type": "call"}, {"api_name": "mercurysms.worker", "line_number": 24, "usage_type": "name"}, {"api_name": "mercurysms.forms.SendSMSForm", "line_number": 29, "usage_type": "call"}, {"api_name": "mercurysms.sheets.SHEETS_URL.format", "line_number": 30, "usage_type": "call"}, {"api_name": "mercurysms.sheets.SHEETS_URL", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mercurysms.sheets", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "428549380", "text": "from PyQt5 import QtWidgets, uic, QtCore\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QVideoFrame\nfrom PyQt5.QtCore import QUrl, QTimer\nfrom PyQt5.QtGui import QPixmap, QImage\nimport sys\nimport os\nimport threading\napp = QtWidgets.QApplication(sys.argv)\nimport cv2\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom time import sleep\n\n\nclass UI(QtWidgets.QMainWindow):\n\t# changePixmap = QtCore.pyqtSignal()\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.ui = uic.loadUi('/workspace/demo_cv2_load_entire.ui', self)\n\t\tself.ui.button_loadvideo.clicked.connect(self.get_videopath)\n\t\tself.ui.button_playpause.clicked.connect(self.playpause)\n\t\tself.ui.button_playpause.setEnabled(False)\n\t\tself.lineEdit.returnPressed.connect(self.set_videopath)\n\t\t# self.slider_video.setRange(0, 0)\n\t\t# self.slider_video.sliderMoved.connect(self.set_video_position)\n\n\t\t# self.player = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n\t\t# self.player.setVideoOutput(self.videowidget)\n\t\t# self.player.positionChanged.connect(self.set_slider_position)\n\t\t# self.player.durationChanged.connect(self.set_slider_duration)\n\n\t\tself.slider_cv2.setRange(0, 0)\n\t\tself.slider_cv2.setValue(0)\n\t\tself.slider_cv2.valueChanged.connect(self.read_videoframe)\n\t\tself.slider_cv2.sliderReleased.connect(self.read_videoframe)\n\t\tself.slider_cv2.sliderPressed.connect(self.stop_timer)\n\t\tself.timer = QTimer(self)\n\t\tself.timer.timeout.connect(self.set_slider_position)\n\n\t\tself.fig = plt.Figure()\n\t\tself.canvas = FigureCanvas(self.fig)\n\t\tself.graph_layout.addWidget(self.canvas)\n\n\t\t# self.changePixmap = QtCore.pyqtSignal(QImage)\n\t\t# self.changePixmap.connect(self.show_videoframe)\n\n\t\t#-------------\n\t\tself.video_format = ('mp4', 'avi', 'mpeg')\n\t\tself.videopath = ''\n\t\tself.frames = []\n\t\tself.isplaying = False\n\t\tself.position = 0\n\t\tself.cap = None\n\t\tself.max_frame = None\n\t\tself.values = []\n\t\t#-------------\n\n\t\tself.ui.show()\n\t\n\tdef set_videopath(self):\n\t\tself.videopath = self.lineEdit.text()\n\t\t# print(self.videopath)\n\t\tif (self.videopath != '') \\\n\t\tand os.path.isfile(self.videopath) \\\n\t\tand self.videopath.split('.')[-1] in self.video_format:\n\t\t\t# self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.videopath)))\n\t\t\t# self.read_videoframe()\n\t\t\tself.cap = cv2.VideoCapture(self.videopath)\n\t\t\tself.max_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\t\tself.slider_cv2.setRange(0, self.max_frame)\n\t\t\tself.values = [0 for _ in range(self.max_frame)]\n\t\t\tself.ax = self.fig.add_subplot(111)\n\t\t\tself.ax.plot(self.values)\n\t\t\tself.ax.set_xlabel(\"frame\")\n\t\t\tself.ax.set_ylabel(\"confidence\")\n\t\t\tself.ax.set_title(\"TEMP graph\")\n\t\t\tself.ax.set_ylim([0., 1.])\n\t\t\tself.ax.legend()\n\t\t\tself.read_videoframe()\n\t\t\tself.ui.button_playpause.setEnabled(True)\n\t\n\tdef get_videopath(self):\n\t\tfilter = \"Videos(*.mp4 *.avi *.mpeg)\"\n\t\tself.videopath = QtWidgets.QFileDialog.getOpenFileName(self, filter=filter)[0]\n\t\t# print(self.videopath)\n\t\tif self.videopath != '':\n\t\t\t# self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.videopath)))\n\t\t\t# self.read_videoframe()\n\t\t\tself.lineEdit.setText(self.videopath)\n\t\t\tself.cap = cv2.VideoCapture(self.videopath)\n\t\t\tself.max_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\t\tself.slider_cv2.setRange(0, self.max_frame)\t\t\t\n\t\t\tself.values = [0 for _ in range(self.max_frame)]\n\t\t\tself.ax = self.fig.add_subplot(111)\n\t\t\tself.ax.plot(self.values)\n\t\t\tself.ax.set_xlabel(\"frame\")\n\t\t\tself.ax.set_ylabel(\"confidence\")\n\t\t\tself.ax.set_title(\"TEMP graph\")\n\t\t\tself.ax.set_ylim([0., 1.])\n\t\t\tself.ax.legend()\n\t\t\tself.read_videoframe()\n\t\t\tself.ui.button_playpause.setEnabled(True)\n\t\t\t# self.changePixmap.emit()\n\n\tdef playpause(self):\n\t\t# if self.player.state() == QMediaPlayer.PlayingState:\n\t\t# \tself.player.pause()\n\t\t# \t# pass\n\t\t# else:\n\t\t# \tself.player.play()\n\t\tif not self.isplaying:\n\t\t\tself.timer.start(500)\n\t\t\tself.isplaying = True\n\t\t\t# print(\"BTN clicked\")\n\t\telse:\n\t\t\tself.timer.stop()\n\t\t\tself.isplaying = False\n\t\t\t# self.changePixmap.emit()\n\t\t\t# self.read_videoframe()\n\t\n\t# def set_video_position(self, position):\n\t# \tself.player.setPosition(position)\n\t\n\tdef set_slider_position(self):\n\t\t# self.slider_video.setValue(position)\n\t\tself.position += 1\n\t\tself.slider_cv2.setValue(self.position)\n\n\t# def set_slider_duration(self, duration):\n\t# \tself.slider_video.setRange(0, duration)\n\t\n\tdef read_videoframe(self):\n\t\t# cap = cv2.VideoCapture(self.videopath)\n\t\t# while cap.isOpened():\n\t\t# \tret, frame = cap.read()\n\t\t# \tif ret:\n\t\t# \t\tself.frames.append(frame)\n\t\t# \t\tprint(\"Loading frame: \", len(self.frames))\n\t\t# \telse:\n\t\t# \t\tbreak\n\t\t# \t# cv2.waitKey(30)\n\t\t# \t# sleep(1)\n\t\t# print(\"End loading\")\n\t\tself.position = self.slider_cv2.value()\n\t\tself.cap.set(1, self.slider_cv2.value())\n\t\tret, frame = self.cap.read()\n\t\tif ret:\n\t\t\tself.show_videoframe(frame)\n\t\telse:\n\t\t\tprint(\"Nothing to show\")\n\t\t# cap.release()\n\t\t# cv2.destroyAllWindows()\n\t\tself.process_frames_temp(frame)\n\t\n\tdef show_videoframe(self, frame):\n\t\t# print(self.slider_cv2.value())\n\t\t# frame = self.frames[self.slider_cv2.value()]\n\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = frame.shape\n\t\tframe = QImage(frame.data, w, h, ch * w, QImage.Format_RGB888)\n\t\tframe = QPixmap(frame)\n\t\tframe = frame.scaled(320, 270, QtCore.Qt.KeepAspectRatio)\n\t\tself.label.setPixmap(frame)\n\t\t# self.label.update()\n\t\n\t# def show_videoframe_stop_timer(self):\n\t# \tself.timer.stop()\n\t# \tframe = self.frames[self.slider_cv2.value()]\n\t# \tself.position = self.slider_cv2.value()\n\t# \tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t# \th, w, ch = frame.shape\n\t# \tframe = QImage(frame.data, w, h, ch * w, QImage.Format_RGB888)\n\t# \tframe = QPixmap(frame)\n\t# \tframe = frame.scaled(320, 270, QtCore.Qt.KeepAspectRatio)\n\t# \tself.label.setPixmap(frame)\n\t# \tself.timer.start(500)\n\n\tdef stop_timer(self):\n\t\tself.timer.stop()\n\t\tself.isplaying = False\n\t\n\tdef process_frames_temp(self, frame):\n\t\t# values = [0.8 for _ in range(len(self.frames))]\n\t\t# print(values)\n\t\tself.values[self.slider_cv2.value()] = 0.8\n\t\tself.ax.plot(self.values)\n\t\tself.canvas.draw()\n\n\nif __name__ == \"__main__\":\n\t# import cv2\n\t# app = QtWidgets.QApplication(sys.argv)\n\twindow = UI()\n\tapp.exec_()\n", "sub_path": "demo_load_lazy.py", "file_name": "demo_load_lazy.py", "file_ext": "py", "file_size_in_byte": 6159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 8, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QTimer", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 84, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 84, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 158, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 158, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QImage", "line_number": 160, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QImage.Format_RGB888", "line_number": 160, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 161, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 162, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "639243463", "text": "import pickle\nimport requests\nimport socket, os\n\nclass Registration(object):\n\n def __init__(self):\n self.hash = ''\n self.code = ''\n self.qr = ''\n\n def register(self, port):\n\n ipaddr = \"\"\n try:\n gw = os.popen(\"ip -4 route show default\").read().split()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((gw[2], 0))\n ipaddr = s.getsockname()[0]\n except IndexError:\n ipaddr = socket.gethostbyname(socket.gethostname())\n\n #url = 'http://localhost:5000/register/%s/%d' % (ipaddr, port)\n url = 'http://130.240.5.87:5000/register/%s/%d' % (ipaddr, port)\n r = requests.post(url)\n status_code = r.status_code\n if status_code != 200:\n print('Something went wrong, got ' + str(status_code))\n return -1\n else:\n response = pickle.loads(r.content) # unbox the response content\n self.hash = response['hash'] # not really needed\n self.code = response['code'] # should be displayed\n self.qr = response['qr']\n\n self.saveQR()\n return\n\n def saveQR(self):\n img = self.qr.make_image()\n img.save('static/media/qr.png') # can also use .jpeg, .bmp\n", "sub_path": "screen/registration.py", "file_name": "registration.py", "file_ext": "py", "file_size_in_byte": 1287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.popen", "line_number": 16, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.gethostbyname", "line_number": 21, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "20306720", "text": "\"\"\"\nThis module provides the Plant and Machine classes.\nCraneMoveTime is the time a crane takes from one Machine to another in the \nPlant.\n\"\"\"\nfrom xml.dom import minidom\nfrom extra import *\n\nCraneMoveTime = 1\n\nclass Machine(object):\n\t\"\"\"\n\tProvides the implementation of a Machine in a Plant.\n\t\"\"\"\n\tdef __init__(self, name, quantity = 1, minDelay = 0, canUnhook = False):\n\t\t\"\"\"\n\t\tname is the unique Machine name.\n\t\tminDelay is the minimum (constant) delay a Machine introduces for any \n\t\tOrder.\n\t\tcanUnhook is whether a crane can leave an Order at this Machine or not.\n\t\tquantity is the number of available machines of this type (name) in \n\t\tthe Plant.\n\t\t\"\"\"\n\t\tobject.__init__(self)\n\n\t\tassert name != None\n\t\tassert name != \"\"\n\t\tassert quantity >= 1\n\t\tassert minDelay >= 0\n\n\t\tself.quantity = quantity\n\t\tself.minDelay = minDelay\n\t\tself.canUnhook = canUnhook\n\t\tself.name = name\n\n\tdef toXml(self, xmlDoc):\n\t\t\"\"\"\n\t\tExports the Machine instance to an XML tree node and returns the node\n\t\tinstance. xmlDoc to used to create the XML tree node element.\n\t\t\"\"\"\n\t\tnode = xmlDoc.createElement(\"machine\")\n\t\tnode.setAttribute(\"name\", self.name)\n\t\tnode.setAttribute(\"quantity\", str(self.quantity))\n\t\tnode.setAttribute(\"minDelay\", str(self.minDelay))\n\t\tnode.setAttribute(\"canUnhook\", str(self.canUnhook))\n\t\treturn node\n\n\t@staticmethod\n\tdef fromXml(element):\n\t\t\"\"\"\n\t\tCreates a Machine instance from XML node tree element and returns it.\n\t\t\"\"\"\n\t\treturn Machine(\n\t\t\tname = element.getAttribute(\"name\"),\n\t\t\tquantity = int(element.getAttribute(\"quantity\")),\n\t\t\tminDelay = int(element.getAttribute(\"minDelay\")),\n\t\t\tcanUnhook = strToBool(element.getAttribute(\"canUnhook\"))\n\t\t)\n\nclass Plant(object):\n\t\"\"\"\n\tProvides the implementation of a Plant (factory) with a list of Machine \n\tinstances.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tmachines is a list of ordered Machine instances (by sequence in Plant).\n\t\tminProcTime is the minimum (constant) processing time for any Order \n\t\tgoing through the Plant. This is the summation of all the times between\n\t\tevery two Machine instances in the Plant.\n\t\t\"\"\"\n\t\tobject.__init__(self)\n\t\tself.machines = []\n\n\tdef toXml(self):\n\t\t\"\"\"\n\t\tCreates an XML tree node from the Plant instance and returns it.\n\t\t\"\"\"\n\t\tdomImp = minidom.getDOMImplementation()\n\t\txmlDoc = domImp.createDocument(None, \"plant\", None)\n\n\t\tfor m in self.machines:\n\t\t\txmlDoc.documentElement.appendChild(m.toXml(xmlDoc))\n\n\t\treturn xmlDoc.documentElement\n\n\tdef toXmlFile(self, filename):\n\t\t\"\"\"\n\t\tSaves the Plant instance to an XML file.\n\t\t\"\"\"\n\t\tfile = open(filename, \"w\")\n\t\tfile.write(self.toXml().toprettyxml())\n\t\tfile.close()\n\n\t@staticmethod\n\tdef fromXml(xmlDoc):\n\t\t\"\"\"\n\t\tA static method that loads a Plant instance (and returns it) from \n\t\tan XML document. xmlDoc is the document instance.\n\t\t\"\"\"\n\t\tplant = Plant()\n\t\tfor e in xmlDoc.getElementsByTagName(\"machine\"):\n\t\t\tplant.addMachine(Machine.fromXml(e))\n\t\treturn plant\n\n\t@staticmethod\n\tdef fromXmlFile(filename):\n\t\t\"\"\"\n\t\tA static methods that loads a Plant instance (and returns it) from \n\t\tan XML file (str filename).\n\t\t\"\"\"\n\t\tfile = open(filename, \"r\")\n\t\tdoc = minidom.parse(file)\n\t\tplant = Plant.fromXml(doc)\n\t\tfile.close()\n\t\treturn plant\n\n\tdef addMachine(self, machine):\n\t\t\"\"\"\n\t\tAdd a Machine instance to the Plant. If the Machine instance or its\n\t\tname is already in the list of machines, an Exception will be thrown.\n\t\tAfter adding a Machine instance, minProcTime is updated.\n\t\t\"\"\"\n\t\tassert machine not in self.machines\n\n\t\tfor m in self.machines:\n\t\t\tif m.name == machine.name:\n\t\t\t\traise Exception(\"Machine name already in plant\")\n\t\tself.machines.append(machine)\n", "sub_path": "Projects/PlantMaker/archive/20100420/plant.py", "file_name": "plant.py", "file_ext": "py", "file_size_in_byte": 3587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "xml.dom.minidom.getDOMImplementation", "line_number": 79, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 79, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parse", "line_number": 113, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "235435700", "text": "import cv2\nimport pafy\n\nmouth_cascade = cv2.CascadeClassifier('./data/haarcascade_mcs_mouth.xml')\n\nlip_mask = cv2.imread('./data/lip5.png')\nh_mask, w_mask = lip_mask.shape[:2]\n\nurl = 'https://www.youtube.com/watch?v=B0abXq6bff4'\nvideo = pafy.new(url)\nbest= video.getbest(preftype='webm')\ncap=cv2.VideoCapture(best.url)\nframe_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\nout = cv2.VideoWriter('lip5_mask_final.MP4', 0x7634706d, 25, frame_size, isColor=True)\n\nif mouth_cascade.empty():\n\traise IOError('Unable to load the mouth cascade classifier xml file')\n\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n mouth_rects = mouth_cascade.detectMultiScale(gray, 1.3,11)\n\n for (x,y,w,h) in mouth_rects:\n w = int(w*2.1)\n h = int(h*0.7)\n x = int(x - 0.10*w)\n y = int(y - 0.15*h)\n # cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 3)\n\n frame_roi = frame[y:y + h, x:x + w] # 얼굴에 해당하는 부분 자르고\n face_mask_small = cv2.resize(lip_mask, (w, h), interpolation=cv2.INTER_AREA)\n # 마스크를 크기에 맞게 자르고\n gray_mask = cv2.cvtColor(face_mask_small, cv2.COLOR_BGR2GRAY)\n\n ret, mask = cv2.threshold(gray_mask, 150, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)\n mask_inv = cv2.bitwise_not(mask)\n masked_face = cv2.bitwise_and(face_mask_small, face_mask_small, mask=mask)\n masked_frame = cv2.bitwise_and(frame_roi, frame_roi, mask=mask_inv)\n frame[y:y + h, x:x + w] = cv2.add(masked_face, masked_frame)\n\n break\n\n cv2.imshow('Mouth Detector', frame)\n out.write(frame)\n\n c = cv2.waitKey(1)\n if c == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n", "sub_path": "hw/4th/hw4_2.py", "file_name": "hw4_2.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "cv2.CascadeClassifier", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "pafy.new", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.add", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "487800659", "text": "from __future__ import print_function\nimport argparse\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom apex.fp16_utils import to_python_float\nimport pprint\nimport logging\nimport time\n\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torch.distributed as dist\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=4096, metavar='N',\n help='Matrix M')\n\nparser.add_argument('--input-size', type=int, default=2048, metavar='N',\n help='Matrix K')\n\nparser.add_argument('--hidden-size', type=int, default=2048, metavar='N',\n help='Matrix K')\n\nparser.add_argument('--output-size', type=int, default=2048, metavar='N',\n help='Matrix N')\n\nparser.add_argument('--bias', action='store_true', default=False,\n help='whether use debug apex')\n\nparser.add_argument('--logfile', type=str, default=None, help='logging output')\n\nparser.add_argument('--hidden-layers', type=int, default=4, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--bucket-size', type=int, default=10000000, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--layers-per-bucket', type=int, default=0, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--delay-allreduce', action='store_true', default=False,\n help='whether use delay allreduce')\n\nparser.add_argument('--datatype', type=int, default=10000000, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--iteration-number', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--warmup-number', type=int, default=10, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--debug-apex', action='store_true', default=False,\n help='whether use debug apex')\n\nparser.add_argument('--debug-apex-dir', type=str, default='/home/scratch.shawnw_gpu/docker/apex/apex/parallel', \n help='custom-apex-dir')\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\nparser.add_argument(\"--local_rank\", default=0, type=int)\n\nargs = parser.parse_args()\n\nif args.debug_apex:\n import sys\n sys.path.insert(0, args.debug_apex_dir)\n from distributed import DistributedDataParallel as DDP\nelse:\n from apex.parallel import DistributedDataParallel as DDP\n \nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nargs.distributed = False\nif 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\n\ndef rank0print(*nargs):\n print_str = \"\"\n for item in nargs:\n print_str += str(item)\n if args.distributed:\n if torch.distributed.get_rank() == 0:\n print(print_str)\n else:\n print(print_str)\n\n\nif args.distributed:\n assert args.cuda, \"Distributed mode requires running with CUDA.\"\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n world_size = torch.distributed.get_world_size()\n if torch.distributed.get_rank() == 0:\n if args.logfile == None:\n logfile = \"logfile_rank0.log\"\n args.logfile = logfile\n logging.basicConfig(format='%(filename)s:%(lineno)d:%(levelname)s:%(message)s', filename=args.logfile, level=logging.DEBUG)\n else:\n logging.basicConfig(format='%(filename)s:%(lineno)d:%(levelname)s:%(message)s', level=logging.INFO)\nelse:\n world_size = 1\n\nrank0print(\"world_size:\", world_size)\n\nclass Net(nn.Module):\n def __init__(self, args):\n super(Net, self).__init__()\n\n self.layers = []\n self.args = args\n\n #input layer\n layer = torch.nn.Linear(args.input_size, args.hidden_size, args.bias)\n self.add_module(\"input_layer\", layer)\n self.layers.append(layer)\n \n #hidden layer\n for i in range(args.hidden_layers):\n layer = torch.nn.Linear(args.hidden_size, args.hidden_size, args.bias)\n self.add_module(\"hidden_layer_\"+str(i), layer)\n self.layers.append(layer)\n\n #output layer\n layer = torch.nn.Linear(args.hidden_size, args.output_size, args.bias)\n self.add_module(\"output_layer\", layer)\n self.layers.append(layer)\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n def register_hook(self):\n self.grad_accs = []\n for name, param in self.named_parameters():\n if param.requires_grad:\n def wrapper(param, name):\n param_tmp = param.expand_as(param)\n grad_acc = param_tmp.grad_fn.next_functions[0][0]\n rank0print(grad_acc)\n def allreduce_hook(*unused):\n rank0print(\"running param:\",name, \" grad at time:\", time.time())\n grad_acc.register_hook(allreduce_hook)\n self.grad_accs.append(grad_acc)\n wrapper(param, name)\n\nmodel = Net(args)\n\nif args.cuda:\n model = model.cuda()\n\nif args.debug_apex:\n model.register_hook()\n\nif args.distributed:\n if args.layers_per_bucket != 0:\n args.bucket_size = args.layers_per_bucket * args.hidden_size *args.hidden_size \n model = DDP(model,message_size=args.bucket_size,delay_allreduce=args.delay_allreduce)\n\ndef train():\n \n rank0print(\"**************************\")\n rank0print(model)\n rank0print(\"**************************\")\n for name, param in model.named_parameters():\n rank0print(name, \" : \", param.size())\n rank0print(\"**************************\")\n\n model.train()\n\n x = torch.randn(args.batch_size, args.input_size, requires_grad=False) \n target = torch.randn(args.batch_size, args.output_size, requires_grad=False) \n loss_fn = torch.nn.MSELoss()\n\n total_backward_time = 0\n total_e2e_time = 0\n total_loss_time = 0\n total_forward_time = 0\n\n if args.cuda:\n x = x.cuda()\n target = target.cuda()\n loss_fn = loss_fn.cuda()\n\n forward_start = torch.cuda.Event(enable_timing=True)\n forward_end = torch.cuda.Event(enable_timing=True)\n nccl_end = torch.cuda.Event(enable_timing=True)\n\n compute_stream = torch.cuda.current_stream()\n\n pipeline_start = True\n\n for i in range(args.iteration_number):\n\n rank0print(\"===ITERATION:%s====\", i)\n\n if (pipeline_start != True):\n grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]\n buckets = split_by_type(grads) \n for tp in buckets:\n bucket = buckets[tp]\n coalesced = flatten(bucket)\n if extra_args is not None:\n call(coalesced, *extra_args)\n else:\n call(coalesced)\n if call is dist.all_reduce:\n coalesced /= dist.get_world_size()\n \n for buf, synced in zip(bucket, unflatten(coalesced, bucket)):\n buf.copy_(synced)\n\n model.zero_grad() \n\n forward_start_time = time.time()\n output = model(x)\n compute_stream.synchronize()\n forward_end_time = time.time()\n\n loss = loss_fn(output, target)\n compute_stream.synchronize()\n loss_end_time = time.time()\n\n loss.backward()\n compute_stream.synchronize()\n\n backward_end_time = time.time()\n\n rank0print(\"step \", i, \n \",e2e:\", backward_end_time-forward_start_time, \n \",forward:\", forward_end_time-forward_start_time, \n \",loss:\",loss_end_time - forward_end_time,\n \",backward:\",backward_end_time - loss_end_time)\n\n if i+1 > args.warmup_number:\n total_e2e_time += backward_end_time - forward_start_time\n total_backward_time += backward_end_time - loss_end_time\n total_loss_time += loss_end_time - forward_end_time\n total_forward_time += forward_end_time-forward_start_time\n \n average_e2e_time = total_e2e_time/(args.iteration_number-args.warmup_number)\n average_forward_time = total_forward_time/(args.iteration_number-args.warmup_number)\n average_loss_time = total_loss_time/(args.iteration_number-args.warmup_number)\n average_backward_time = total_backward_time/(args.iteration_number-args.warmup_number)\n\n rank0print(\"===RUN EPIOLOG===\")\n rank0print(\"Average e2e:\", average_e2e_time,\n \",forward:\", average_forward_time,\n \",loss:\", average_loss_time,\n \",backward:\", average_backward_time)\n\n rank0print(\"csv;{};{};{};{};{};{}\".format(world_size, args.layers_per_bucket, average_e2e_time, average_forward_time, average_loss_time, average_backward_time))\n\ntrain()\n\nlogging.shutdown()\n", "sub_path": "examples/toy/bucket_optimized.py", "file_name": "bucket_optimized.py", "file_ext": "py", "file_size_in_byte": 9044, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.distributed.get_rank", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.distributed.get_world_size", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.distributed.get_rank", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 104, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 108, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 108, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 110, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 153, "usage_type": "call"}, {"api_name": "apex.parallel.DistributedDataParallel", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "attribute"}, {"api_name": "torch.cuda.Event", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 196, "usage_type": "attribute"}, {"api_name": "torch.cuda.Event", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 197, "usage_type": "attribute"}, {"api_name": "torch.cuda.Event", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 198, "usage_type": "attribute"}, {"api_name": "torch.cuda.current_stream", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 200, "usage_type": "attribute"}, {"api_name": "torch.distributed.all_reduce", "line_number": 218, "usage_type": "attribute"}, {"api_name": "torch.distributed", "line_number": 218, "usage_type": "name"}, {"api_name": "torch.distributed.get_world_size", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 219, "usage_type": "name"}, {"api_name": "time.time", "line_number": 226, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}, {"api_name": "time.time", "line_number": 233, "usage_type": "call"}, {"api_name": "time.time", "line_number": 238, "usage_type": "call"}, {"api_name": "logging.shutdown", "line_number": 267, "usage_type": "call"}]} +{"seq_id": "421760806", "text": "import os\nimport csv\nimport sys\nimport logging\nimport pymongo\n\nfrom config.settings import config\nfrom etl.transform import coauthorGraph, coauthorMatrix\nfrom etl.transform import collaboratorGraph, collaboratorOrgs\n\nlogging.basicConfig(\n filename=os.path.join(config['LOG_DIR'],'etl.log'),\n format='%(asctime)-15s %(message)s',\n level=logging.INFO)\n\nrab_jobs = [ coauthorGraph, coauthorMatrix,\n collaboratorGraph, collaboratorOrgs ]\n\ndef load_csv(fileName):\n with open(fileName, 'r' ) as f:\n rdr = csv.reader(f)\n data = [ row for row in rdr ]\n return data\n\ndef main():\n extractDir = config['EXTRACT_DIR']\n mongo = pymongo.MongoClient(config['MONGO_URI'], config['MONGO_PORT'])\n mongo_db = mongo.get_database(config['MONGO_DB'])\n auth = mongo_db.authenticate(config['MONGO_USER'], config['MONGO_PASSWORD'])\n\n for job in rab_jobs:\n logging.info(\"Begin: \" + job.__name__)\n viz_coll = mongo_db[ job.collection_name ]\n coll_key = job.key_field\n coll_val = job.value_field\n\n datasets = []\n for input_file in job.input_files:\n data = load_csv(os.path.join(extractDir, input_file) )\n datasets.append(data)\n\n data_generator = job.transform(*datasets)\n for key, timestamp, trans_data in data_generator:\n viz_coll.update_one({ coll_key: key },\n {'$set' : { 'updated': timestamp, coll_key: key,\n coll_val: trans_data } }, upsert=True)\n logging.info(\"Completed: \" + job.__name__)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "rabviz/etl/run_transform.py", "file_name": "run_transform.py", "file_ext": "py", "file_size_in_byte": 1581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.settings.config", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "etl.transform.coauthorGraph", "line_number": 16, "usage_type": "name"}, {"api_name": "etl.transform.coauthorMatrix", "line_number": 16, "usage_type": "name"}, {"api_name": "etl.transform.collaboratorGraph", "line_number": 17, "usage_type": "name"}, {"api_name": "etl.transform.collaboratorOrgs", "line_number": 17, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "config.settings.config", "line_number": 26, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 27, "usage_type": "call"}, {"api_name": "config.settings.config", "line_number": 27, "usage_type": "name"}, {"api_name": "config.settings.config", "line_number": 28, "usage_type": "name"}, {"api_name": "config.settings.config", "line_number": 29, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "165678653", "text": "import numpy as np\nimport pandas as pd\nimport hashlib\n\nfrom tqdm import tqdm\n\n\n# verbose function\ndef tqdm_v(gen, verbose=True, **kwargs):\n if verbose:\n return tqdm(gen, **kwargs)\n else:\n return gen\n\n\nclass Preprocessing:\n def __init__(self):\n pass\n\n def get_name(self):\n return \"\"\n\n def apply(self, data):\n return data\n\n\nclass RandomSlice(Preprocessing):\n \"\"\"\n Preprocessing object that extracts a random slice from the data.\n \"\"\"\n def __init__(self, slice_size = -1, slice_start = None):\n self.slice_param = slice_size\n self.start_param = slice_start\n\n def get_name(self):\n \"\"\"Returns the name of the object.\n\n Returns\n -------\n str\n Name of the instanciated RandomSlice object,\n based on its parameters.\n\n \"\"\"\n return \"_rand-slice-{}{}\".format(self.slice_param,\n (\"-\"+str(self.start_param)) if self.start_param is not None else \"\")\n\n def apply(self, data, verbose=False):\n \"\"\"Applies slicing to given data.\n\n Parameters\n ----------\n data : numpy.ndarray\n The data to apply the slicing on.\n verbose : bool\n Whether to print stuff.\n\n Returns\n -------\n numpy.ndarray\n The data, sliced accordingly.\n\n \"\"\"\n n = data.shape[1]\n if verbose:\n print(\"retrieving window\")\n # slice_size\n if self.slice_param == -1 or self.slice_param > n:\n slice_size = n\n elif type(self.slice_param) == float:\n slice_size = int(self.slice_param * n)\n else:\n slice_size = self.slice_param\n # slice_start\n if self.start_param is None:\n slice_start = np.random.randint(0,n - slice_size + 1)\n else:\n slice_start = self.start_param\n # slicing\n return data[:, slice_start:(slice_start + slice_size)]\n\n\nclass HashingWindow(Preprocessing):\n def __init__(self, granularity=1, hashing=None):\n \"\"\"Initializes HashingWindow object.\n\n Parameters\n ----------\n granularity : int\n Size of window to consider as atomic element of a sequence.\n Default, 1, means that we take the data as is.\n 10 means that we slice the data into slices of width 10, and work on hashes of those length-10 slices.\n hashing : type\n Hashing method. Must be a hashlib method.\n\n \"\"\"\n self.granularity = granularity\n self.hasher = hashing if hashing is not None else hashlib.md5\n\n def get_name(self):\n \"\"\"Returns the name of the HashingWindow object.\n\n Returns\n -------\n str\n Name of the object, taking into account granularity and hashing method.\n\n \"\"\"\n return \"hash-{}-{}\".format(self.hasher().name, self.granularity)\n\n def apply(self, data, verbose=False):\n # slicing data into evenly sized chunks\n def chunk_sizes():\n l = list()\n curr = 0\n for i in range(0, data.shape[1], self.granularity):\n curr += min(self.granularity, data.shape[1]-i)\n l.append(curr)\n return np.array(l[:-1])\n\n chunks = chunk_sizes()\n result = np.zeros((data.shape[0], len(chunks)+1))\n for j,subarray in tqdm_v(enumerate(np.split(data, chunks, axis=1)),\n verbose, desc=\"hashing\"):\n for i in range(subarray.shape[0]):\n result[i,j] = int(self.hasher((''.join(map(str,subarray[i,:]))).encode('UTF-8')).hexdigest(),16)\n return result\n", "sub_path": "Code/common/preprocessing.py", "file_name": "preprocessing.py", "file_ext": "py", "file_size_in_byte": 3670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "tqdm.tqdm", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "400376053", "text": "import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct ## new\nimport zlib\nimport tensorflow as tf\nfrom align_custom import AlignCustom\nfrom face_feature import FaceFeature\nfrom mtcnn_detect import MTCNNDetect\nfrom tf_graph import FaceRecGraph\nimport argparse\nimport sys\nimport json\nimport time\nfrom threading import *\nimport multiprocessing\nfrom multiprocessing import Pipe\nimport mss\nimport time\nfrom FaceRecog import *\nglobal server_socket\nglobal FrameCounter\nglobal frameList\nglobal devices\nglobal noc\n\n\ndef add(a,b):\n\tc=a+b\n\tprint(c)\n\treturn c\n\ndef createSocket():\n\tglobal server_socket\n\tglobal FrameCounter\n\tglobal aligner\n\tglobal extract_feature\n\tglobal face_detect\n\tglobal devices\n\tglobal noc\n\tdevices=[]\n\tFrameCounter,aligner,extract_feature,face_detect=startRec()\n\tFrameCounter=0\n\tnoc=1000\n\tserver_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t#server_socket.settimeout(0.1)\n\tprint('Socket created')\n\n\tserver_socket.bind(('',9000))\n\tprint('Socket bind complete')\n\tserver_socket.listen(noc)\n\tprint('Socket now listening')\n\treturn server_socket\n\t\ndef start(socket):\n\t\trpf=Thread(target=recPiFrame(socket))\n\t\trpf.start()\ndef connectDevice(server_socket):\n\tglobal devices\n\tconCount=0\n\ttry:\n\t\t\n\t\tprint(\"accepting\")\n\t\tconn,add=server_socket.accept()\n\t\ttime.sleep(0.1)\n\t\tprint(\"new device connected | address : \",add[0],\" \",add[1])\n\t\tif conn is not None:\n\t\t\tdevices.append(conn)\n\t\t\t\n\t\t\ttime.sleep(0.5)\n\texcept Exception as e:\n\t\tprint(e,\"error\")\n\t\ttime.sleep(0.1)\n\ttime.sleep(0.5)\n\ndef recPiFrame(server_socket):\n\tglobal frameList\n\tglobal aligner\n\tglobal extract_feature\n\tglobal face_detect\n\tglobal FrameCounter\n\tglobal devices\n\tframeList=[]\t\n\td=0\n\tconnectDevice(server_socket)\n\t\n\tconn1=devices[0]\n\tprint(\"|--- Conection has been established with pi ---|\")\n\tp_output, p_input = Pipe()\n\tsvf=Thread(target=sendVideoFrame(server_socket))\n\tsvf.start()\n\ttime.sleep(0.01)\n\tdata = b\"\"\n\tpayload_size = struct.calcsize(\">L\")\n\ttime.sleep(1)\n\tprint(\"payload_size: {}\".format(payload_size))\n\twhile True:\n\t\tprint(\"recv:\")\n\t\ttime.sleep(0.01)\n\t\twhile len(data) < payload_size:\n\t\t\tdata += conn1.recv(4096)\n\t\tpacked_msg_size = data[:payload_size]\n\t\tdata = data[payload_size:]\n\t\tmsg_size = struct.unpack(\">L\", packed_msg_size)[0]\n\t\twhile len(data) < msg_size:\n\t\t\tdata += conn1.recv(4096)\n\t\tframe_data = data[:msg_size]\n\t\tdata = data[msg_size:]\n\t\tframe=pickle.loads(frame_data, fix_imports=True, encoding=\"bytes\")\n\t\tframe = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n\t\tframeList,FrameCounter,frame=camera_recog(frame,frameList,FrameCounter,aligner,extract_feature,face_detect)\n\t\ttime.sleep(0.01)\n\t\t\n\t\tcv2.waitKey(1)\n\n\ndef sendVideoFrame(server_socket):\n\tglobal FrameCounter\n\tglobal frameList\n\tglobal devices\n\tprint(\"waiting for devices to connect\")\n\ttime.sleep(1.0)\n\timg_counter = 0\n\tconnectDevice(server_socket)\n\tencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n\tprint('sending')\n\tcmdRecThread=Thread(target=RecCommand)\n\tcmdRecThread.start()\n\tcmdRecThread.join()\n\tfail=0\n\n\twhile True:\n\t\ttry:\n\t\t\tcountDevices=1\n\t\t\twhile (countDevices 3:\n\t\t\t\t\tframe =frameList[FrameCounter-2]\n\t\t\t\t\tcv2.imshow('fcontroller'+str(len(frame)),frame)\n\t\t\t\t\tresult, frame = cv2.imencode('.jpg', frame, encode_param)\n\t\t\t\t\t#data = zlib.compress(pickle.dumps(frame, 0))\n\t\t\t\t\tdata = pickle.dumps(frame, 0)\n\t\t\t\t\tsize = len(data)\n\n\t\t\t\t\t#print(\"frames:\",\"{}: {}\".format(img_counter, size))\n\t\t\t\t\t\n\t\t\t\t\tconn2=devices[countDevices]\n\t\t\t\t\tds=conn2.send(frame)\n\t\t\t\t\tif(ds==0):\n\t\t\t\t\t\tconnectDevice(server_socket)\n\t\t\t\t\t\n\t\t\t\t\ttime.sleep(0.05)\n\t\t\t\t\tcountDevices+=1\n\t\t\t\t\t\n\t\t\t\t\timg_counter += 1\n\t\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\t\tbreak\t\n\t\texcept Exception as e:\n\t\t\tpass\n\t\t\n\t\t\ndef RecCommand():\n\ttime.sleep(1)\n\twhile True:\t\n\t\tglobal devices\n\t\tcd=1\n\t\twhile cd \"ray.ObjectRef\":\n log.info(\"[wrapper] Got executor.\")\n executor = get_or_create_kv_store(\n identifier=RayBackend.store_identifier, allow_new=True\n )\n log.info(f\"[wrapper] Launching task (with {args}, {kwargs}.\")\n ret_str = executor.execute(f, args=args, kwargs=kwargs, eager=eager)\n log.info(\"[wrapper] Remote task finished\")\n return ret_str\n\n return wrapper\n\n\ndef ray_task(\n python_callable: Optional[Callable] = None,\n ray_conn_id: str = \"ray_default\",\n ray_worker_pool: str = \"ray_worker_pool\",\n eager: bool = False,\n):\n \"\"\"Wraps a function to be executed on the Ray cluster.\n\n The return values of the function will be cached on the Ray object store.\n Downstream tasks must be ray tasks too, as the dependencies will be\n fetched from the object store. The RayBackend will need to be setup in your\n Dockerfile to use this decorator.\n\n Use as a task decorator: ::\n\n from ray_provider.decorators import ray_task\n\n def ray_example_dag():\n\n @ray_task(\"ray_conn_id\")\n def sum_cols(df: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(df.sum()).T\n\n :param python_callable: Function to be invoked on the Ray cluster.\n :type python_callable: Optional[Callable]\n :param http_conn_id: Http connection id for conenction to ray.\n :type http_conn_id: str\n :param ray_worker_pool: The pool that controls the\n amount of parallel clients created to access the Ray cluster.\n :type ray_worker_pool: Optional[str]\n :param eager: Whether to run the the function on the\n coordinator process (on the Ray cluster) or to\n send the function to a remote task. You should\n set this to False normally.\n :type eager: Optional[bool]\n \"\"\"\n\n @functools.wraps(python_callable)\n def wrapper(f):\n\n return task(\n ray_wrapped(f, ray_conn_id, eager=eager),\n pool=ray_worker_pool,\n )\n\n return wrapper\n", "sub_path": "ray_provider/decorators/ray_decorators.py", "file_name": "ray_decorators.py", "file_ext": "py", "file_size_in_byte": 2441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "ray_provider.xcom.ray_backend.get_or_create_kv_store", "line_number": 16, "usage_type": "call"}, {"api_name": "ray_provider.xcom.ray_backend.RayBackend.store_identifier", "line_number": 17, "usage_type": "attribute"}, {"api_name": "ray_provider.xcom.ray_backend.RayBackend", "line_number": 17, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 13, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 28, "usage_type": "name"}, {"api_name": "airflow.operators.python.task", "line_number": 67, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "343368833", "text": "import copy\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport os.path\nimport sys\nsys.path.append('../')\nfrom types import SimpleNamespace as Namespace\nfrom feature.SimpleFeatureExtractor import SimpleFeatureExtractor\nfrom util.Util import Util\n\nclassificationNum = 3\nrootDir = '../../'\npath = rootDir + Util.getConfig('trials_folder_path')\ntmpPath = rootDir + Util.getConfig('tmp_path')\ndataFileNames = ['0a.json','0b.json','0c.json']\nlabels = [0, 1, 1] #['normal', 'hole', 'scallop']\nextractor = SimpleFeatureExtractor()\ndfAll = None\nfor index, dataFileName in enumerate(dataFileNames):\n df = extractor.getSimpleFeaturedData(path + dataFileName, labels[index])\n # print(len(df))\n print(path + dataFileName, labels[index],len(df))\n if dfAll is None:\n dfAll = df\n continue\n else:\n dfAll = dfAll.append(df)\n\nprint(len(dfAll))\n\ndfAll = dfAll[['x', 'y', 'z', 'label']]\ndata = dfAll.as_matrix()\nprint(data)\n\nprint('****************Start to run classifications***************')\nrand_data = copy.deepcopy(data)\nnp.random.shuffle(rand_data)\n# extract a stack of 28x28 bitmaps\nX_rand = rand_data[:,:len(data[0])-1]\ny_rand = rand_data[:,len(data[0])-1]\n\n# X_rand = digits[:, 0:784]\n# y_rand = digits[:, 784:785]\nheldout_len = int(len(X_rand)*0.8)\nx_train = X_rand[:heldout_len]\ny_train = y_rand[:heldout_len]\nx_test = X_rand[heldout_len:]\ny_test = y_rand[heldout_len:]\n# X = data[:,:3]\n# y = data[:,4]\n\nfor numTree in range(1,11):\n if(numTree %2 == 0):\n continue\n \"\"\"Random Forest\"\"\"\n from sklearn.ensemble import RandomForestClassifier\n rf_model = RandomForestClassifier(n_estimators=numTree)\n model = rf_model\n print('Random Forest(',numTree,'):')\n\n # \"\"\"Artificial Neural Network\"\"\"\n # from sklearn.neural_network import MLPClassifier\n # ann_model = MLPClassifier()\n # model = ann_model\n # print('ANN:')\n #\n # \"\"\"SVM\"\"\"\n # from sklearn.svm import SVC\n # svm_model = SVC()\n # model = svm_model\n # print('SVM:')\n\n model.fit(x_train,y_train)\n print('Training score: ',model.score(x_train,y_train))\n print('Testing score: ', model.score(x_test,y_test))\n\n from sklearn.metrics import classification_report\n y_true = y_test\n y_pred = model.predict(x_test)\n target_names = ['0', '1']\n print(classification_report(y_true, y_pred, target_names=target_names))\n\n\n # from sklearn.model_selection import cross_val_score\n # cross_val_score = cross_val_score(model, x_train, y_train, cv=10)\n # print(cross_val_score)\n", "sub_path": "script/process/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "util.Util.Util.getConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "util.Util.Util", "line_number": 15, "usage_type": "name"}, {"api_name": "util.Util.Util.getConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "util.Util.Util", "line_number": 16, "usage_type": "name"}, {"api_name": "feature.SimpleFeatureExtractor.SimpleFeatureExtractor", "line_number": 19, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "213313372", "text": "#naming game\n\nimport sys\nimport random as r\nimport csv \nfrom itertools import combinations\n#from statistics import mean\n#from timeit import default_timer as timer\n\nfrom fly import Firefly\n\nNUM_SPECIES = 3\nNUM_EACH = 15\nEPOCHS = 500\nMUTATE_PROB = .1\nTRIALS = 10\n\n#given list of fireflies, the epoch number\n#implement original naming game \ndef round_one(fireflies, epoch, A, B):\n for (i, j) in combinations(fireflies, 2):\n same = i.same_species(j)\n if same:\n #both no pattern\n if i.pattern == None and j.pattern == None:\n i.init_pattern()\n j.pattern = i.pattern\n #j has pattern\n elif i.pattern == None:\n i.pattern = j.pattern\n #i has pattern\n elif j.pattern == None:\n j.pattern = i.pattern\n #both have\n elif i.score() != None and j.score() != None:\n #compare lincombo, replicate smaller one\n iscore = (A*i.score()) + (B*i.num_flash())\n jscore = (A*j.score()) + (B*j.num_flash())\n if iscore <= jscore:\n j.pattern = i.pattern[:]\n if r.random() < MUTATE_PROB and epoch < 495:\n j.mutate()\n j.reset_simscore()\n j.last_score = iscore\n else:\n i.pattern = j.pattern[:]\n if r.random() < MUTATE_PROB and epoch < 495:\n i.mutate()\n i.reset_simscore()\n i.last_score = jscore\n\n else:\n #calculate and update similarity score\n if i.pattern == None:\n i.init_pattern()\n if j.pattern == None:\n j.init_pattern()\n distance = i.calc_similarity(j)\n i.update_simscore(distance)\n j.update_simscore(distance)\n\n#printing results\ndef list_flies(flies):\n flies.sort()\n seen = {}\n for f in flies:\n if (str(f.set_start()), f.species) not in seen:\n seen[(str(f.set_start()), f.species)] = f.last_score\n elif f.last_score < seen[(str(f.set_start()), f.species)]:\n seen[(str(f.set_start()), f.species)] = f.last_score\n \n return seen\n\n#write to csv\ndef print_csv(results):\n with open('results.csv', mode = 'w') as file:\n writer = csv.writer(file, delimiter = ',')\n \n for run in results.keys():\n row = [run]\n flies = results[run]\n row += flies\n row += flies.values()\n writer.writerow(row)\n\n\ndef main(args):\n #keep track of all the results\n runs = {}\n\n a = [.2, .25, .3, .35, .4, .45, .5]\n \n for A in a:\n B = 1-A\n for rep in range(TRIALS):\n fireflies = [0] * (NUM_SPECIES * NUM_EACH)\n for i in range(NUM_SPECIES):\n for j in range(NUM_EACH):\n fireflies[j+(NUM_EACH*i)] = Firefly(i)\n\n for epoch in range(EPOCHS):\n r.shuffle(fireflies)\n round_one(fireflies, epoch, A, B)\n \n runs[(A, B, rep)] = list_flies(fireflies)\n\n print_csv(runs)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n", "sub_path": "simplefly/learn_length/simulation.py", "file_name": "simulation.py", "file_ext": "py", "file_size_in_byte": 3279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "itertools.combinations", "line_number": 21, "usage_type": "call"}, {"api_name": "random.random", "line_number": 41, "usage_type": "call"}, {"api_name": "random.random", "line_number": 47, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 77, "usage_type": "call"}, {"api_name": "fly.Firefly", "line_number": 99, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 102, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 110, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "9116481", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport sys\nimport pickle\nimport torch.nn.utils\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom tqdm.auto import tqdm\nfrom sklearn.metrics import r2_score, precision_score, recall_score, f1_score, confusion_matrix, accuracy_score\nimport itertools as it\nimport time\nimport numpy as np\nfrom seqeval.metrics import classification_report\n\nfrom tqdm.auto import tqdm\n\ndef load_emb(path, total=None):\n toks = []\n embs = []\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n tok, *emb = l.strip().split()\n emb = [float(x) for x in emb]\n toks.append(tok)\n embs.append(emb)\n assert('PAD_TOK' not in toks and 'UNK_TOK' not in toks)\n toks += ['PAD_TOK', 'UNK_TOK']\n embs += [[0.]*len(emb), [0.]*len(emb)]\n tok_to_id = dict(zip(toks, it.count()))\n emb = torch.tensor(embs)\n return tok_to_id, emb\n\n# load characters from (training) data\ndef load_chrs(path, total=None):\n chars = set()\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n try:\n for c in l.strip().split()[2]:\n chars.add(c)\n except:\n pass\n assert('PAD_CHR' not in chars and 'UNK_CHR' not in chars)\n chars = sorted(chars)\n chars.append('PAD_CHR')\n chars.append('UNK_CHR')\n return dict(zip(chars, it.count())) \n\ndef load_classes(path, total=None):\n id_to_lbl = set()\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n try:\n id_to_lbl.add(l.strip().split()[3])\n except:\n pass\n assert('PAD_LBL' not in id_to_lbl)\n id_to_lbl = sorted(id_to_lbl)\n id_to_lbl.append('PAD_LBL')\n lbl_to_id = {k:v for v, k in enumerate(id_to_lbl)}\n return lbl_to_id, id_to_lbl\n \ndef load_data(path, tok_to_id, lbl_to_id, chr_to_id):\n with open(path, 'r') as f:\n seqs = f.read().split('\\n\\n')\n if not seqs[-1].strip():\n seqs.pop()\n if seqs[0][0] == '\\n':\n seqs[0] = seqs[0][1:]\n seqs = [l.split('\\n') for l in seqs]\n seq_len = max((len(seq) for seq in seqs))\n seqs = [[l.split(' ') for l in seq] for seq in seqs]\n wrd_len = max((max((len(cols[2]) for cols in seq)) for seq in seqs))\n W = torch.empty((len(seqs), seq_len, wrd_len), dtype=torch.long).fill_(chr_to_id['PAD_CHR'])\n X = torch.empty((len(seqs), seq_len), dtype=torch.long).fill_(tok_to_id['PAD_TOK'])\n Y = torch.empty((len(seqs), seq_len), dtype=torch.long).fill_(lbl_to_id['PAD_LBL'])\n for i, seq in enumerate(tqdm(seqs, 'sequences')):\n for j, cols in enumerate(seq):\n assert(j < seq_len)\n tok, _, wrd, lbl = cols\n for k, ch in enumerate(wrd):\n try:\n W[i,j,k] = chr_to_id[ch]\n except KeyError:\n W[i,j,k] = chr_to_id['UNK_CHR']\n try:\n X[i,j] = tok_to_id[tok]\n except KeyError:\n X[i,j] = tok_to_id['UNK_TOK'] \n Y[i,j] = lbl_to_id[lbl]\n return W, X, Y\n\nclass NERDataset(data.Dataset):\n def __init__(self, W, X, Y):\n self.W, self.X, self.Y = W, X, Y\n \n def __len__(self):\n return self.Y.shape[0]\n \n def __getitem__(self, i):\n return self.W[i], self.X[i], self.Y[i]\n\nclass LinearCRF(nn.Module):\n def __init__(self, input_size, hidden_size, lbl_to_id, lstm_model, Y=None, freeze=False):\n super().__init__()\n \n #self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True, bidirectional=False)\n self.lstm = lstm_model\n self.lbl_to_id = lbl_to_id\n self.lbl_to_id['STR_LBL'] = 18\n self.num_tags = 19 #Includes the 17 tags + start + end(PAD_TOK)\n #self.T.data[0:17,0:17] = torch.from_numpy(get_matrix().transpose())\n\n # Rishabh: initialization\n if Y is None:\n self.T = nn.Parameter(torch.randn(self.num_tags, self.num_tags))\n else:\n print('initializing self.T')\n Y = torch.cat((\n torch.empty(Y.shape[0], 1, dtype=torch.long).fill_(self.lbl_to_id['STR_LBL']),\n Y.cpu(),\n torch.empty(Y.shape[0], 1, dtype=torch.long).fill_(self.lbl_to_id['PAD_LBL'])\n ), dim=-1)\n Y = Y.cuda()\n A = torch.empty(self.num_tags, self.num_tags)\n for i in range(self.num_tags):\n for j in range(self.num_tags):\n A[i,j] = 1+torch.sum((Y[:,:-1] == i) & (Y[:,1:] == j)).item()\n #for i in tqdm(range(Y.shape[0]), 'sequences'):\n # for j in range(Y.shape[1]-1):\n # A[Y[i,j], Y[i,j+1]] += 1\n A /= torch.sum(A, dim=-1)[:,None]\n A = torch.log(A)\n self.T = nn.Parameter(A.transpose(0,1))\n if freeze:\n print('frozen transition matrix')\n self.T.requires_grad=False\n \n self.dropout = nn.Dropout(0.5)\n self.proj = nn.Sequential( nn.Linear(2 * input_size, input_size), \n nn.ReLU(),\n nn.Linear(input_size, 19))\n \n \n def forward(self, W, X):\n #X is of the shape (batch_size, seq_length, num_features)\n \n o = self.lstm(W, X)\n \n #return F.softmax(self.proj(o), dim=2)\n return o\n\n def predict(self, P, mask):\n \n with torch.no_grad():\n batch_size = P.shape[0]\n path = []\n for i in range(batch_size):\n path.append([])\n \n choice = torch.zeros(P.shape[0], P.shape[1], 19).cuda()\n \n prob_matrix = self.T\n # X.shape is (batch_size, sentence_length)\n \n \n DP = torch.full((P.shape[0], 19), -1000).cuda()\n DP[:, self.lbl_to_id['START_LBL']] = 0 #start tag\n for i in range(batch_size):\n for j in range(19):\n choice[i,0,j] = j\n #path[:,0] = DP[:,0,:].argmax(dim=1)\n \n for i in range(0, P.shape[1]):\n #next_DP = \n submask = mask[:, i].unsqueeze(1).float() # [B, 1]\n emission_score = P[:, i] # [B, C]\n\n # [B, 1, C] + [C, C]\n next_choice = DP.unsqueeze(1) + self.T # [B, C, C]\n next_choice, choice[:, i, :] = next_choice.max(dim=-1)\n next_choice += emission_score\n DP = next_choice * submask + DP * (1 - submask) # max_score or acc_score_t\n \n DP += self.T[self.lbl_to_id['PAD_LBL']]\n last_elem = DP.argmax(-1)\n # now, the choice vector has been constructed and the solution can\n # be computed in the reverse direction. \n # DP[i][j][k] indicates the choice made at the kth step in the jth token \n # of the ith sentences\n \n choice = choice.cpu()\n \n for i in range(batch_size):\n \n num_tags = mask[i].sum()\n path[i].append(last_elem[i].int().item())\n prev = last_elem[i].int().item()\n for j in range(int(num_tags) - 2, -1, -1):\n \n path[i].append(choice[i][j + 1][prev].int().item())\n prev = choice[i][j + 1][prev].int().item()\n \n \n for i in range(batch_size):\n path[i].reverse()\n \n return path\n \ndef log_sum_exp(x):\n \"\"\"calculate log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))\n \"\"\"\n max_score = x.max(-1)[0]\n return max_score + (x - max_score.unsqueeze(-1)).exp().sum(-1).log()\n \ndef train(train_set, dev_set, ner_model, id_to_lbl, lbl_to_id, pad_lbl_id, output_file, freeze=False):\n \n id_to_lbl[len(id_to_lbl) - 1] = 'START_LBL'\n \n trainset = NERDataset(train_set[0], train_set[1], train_set[2])\n devset = NERDataset(dev_set[0], dev_set[1], dev_set[2])\n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)\n devloader = torch.utils.data.DataLoader(devset, batch_size=128, shuffle=False, num_workers=4)\n \n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n \n net = LinearCRF(100, 100, lbl_to_id, ner_model, train_set[2], freeze=freeze).to(device)\n \n print(net)\n \n tic = time.time()\n optimizer = optim.Adam(net.parameters(), lr=1e-3)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 100)\n \n patience = 15\n steps_left = patience\n min_val_loss = float('inf')\n best_model_dict = None\n early_stop = False\n\n for epoch in range(1, 101): # loop over the dataset multiple times\n #if epoch % 10 == 0 or epoch == 1:\n net.train()\n \n running_loss = 0.0\n for i, mbatch in enumerate(trainloader):\n W, X, Y = mbatch\n W, X, Y = W.to(device), X.to(device), Y.to(device)\n\n labels = torch.nn.functional.one_hot(Y, num_classes=19).float()\n\n mask = (~(Y == lbl_to_id['PAD_LBL'])).float() # mask is of shape batch_size (batch_size * seq_len)\n\n optimizer.zero_grad()\n P = net(W, X) #shape = (batch_size, sentence_length, num_tags). This is the P matrix.\n\n\n score_curr = ((P * labels).sum(dim=2) * mask).sum(dim=1) #Sanity checked to be correct.\n prob_matrix = net.T\n\n prob_sum = net.T[Y[:,0:1], lbl_to_id['START_LBL']].squeeze() + (net.T[Y[:,1:], Y[:,:-1]] * mask[:,:-1]).sum(dim=1)\n\n score = score_curr + prob_sum\n #print(\"score:\", score)\n\n\n DP = torch.full((X.shape[0], 19), -1000).cuda() #DP.shape is batch_size * num_tags\n DP[:, lbl_to_id['START_LBL']] = 0.\n for j in range(X.shape[1]):\n\n sub_mask = mask[:,j].unsqueeze(1)\n DP = (sub_mask) * (log_sum_exp(DP.unsqueeze(1) + prob_matrix.unsqueeze(0) + P[:,j].unsqueeze(2))) + (1 - sub_mask) * DP\n\n partition = (DP + net.T[lbl_to_id['PAD_LBL']]).logsumexp(dim=1)\n #print(\"partition:\", partition)\n #break\n loss = (partition - score).sum() / X.shape[0] #sum over all minibatches\n loss.backward()\n\n\n nn.utils.clip_grad_value_(net.parameters(), 5)\n optimizer.step()\n running_loss += loss.item()\n\n if i % 10 == 9:\n print('[%d, %5d] loss: %f' % (epoch, i + 1, running_loss / 10))\n running_loss = 0.0\n #scheduler.step()\n \n net.eval()\n \n total_loss = 0.0\n for i, mbatch in enumerate(devloader):\n W, X, Y = mbatch\n W, X, Y = W.to(device), X.to(device), Y.to(device)\n \n labels = torch.nn.functional.one_hot(Y, num_classes=19).float()\n\n mask = (1 - (Y == lbl_to_id['PAD_LBL'])).float() # mask is of shape batch_size (batch_size * seq_len)\n\n P = net(W, X) #shape = (batch_size, sentence_length, num_tags). This is the P matrix.\n\n\n score_curr = ((P * labels).sum(dim=2) * mask).sum(dim=1) #Sanity checked to be correct.\n prob_matrix = net.T\n\n prob_sum = net.T[Y[:,0:1], lbl_to_id['START_LBL']].squeeze() + (net.T[Y[:,1:], Y[:,:-1]] * mask[:,:-1]).sum(dim=1)\n\n score = score_curr + prob_sum\n\n\n DP = torch.full((X.shape[0], 19), -1000).cuda() #DP.shape is batch_size * num_tags\n DP[:, lbl_to_id['START_LBL']] = 0.\n for j in range(X.shape[1]):\n\n sub_mask = mask[:,j].unsqueeze(1)\n DP = (sub_mask) * (log_sum_exp(DP.unsqueeze(1) + prob_matrix.unsqueeze(0) + P[:,j].unsqueeze(2))) + (1 - sub_mask) * DP\n\n partition = (DP + net.T[lbl_to_id['PAD_LBL']]).logsumexp(dim=1)\n #print(\"partition:\", partition)\n #break\n loss = (partition - score).sum() #sum over all minibatches\n total_loss += loss.item()\n \n \n total_loss = total_loss / len(devset)\n \n print(\"val loss:\", total_loss)\n print(\"best loss:\", min_val_loss)\n print(\"Patience:\", steps_left)\n print(\"time:\", time.time() - tic)\n \n if total_loss < min_val_loss:\n steps_left = patience\n min_val_loss = total_loss\n best_model_dict = net.state_dict()\n else:\n if steps_left == 1:\n early_stop = True\n break\n else:\n steps_left -= 1\n \n print(\"Early stop:\", early_stop)\n if early_stop:\n print(\"Replacing with better model\")\n net.load_state_dict(best_model_dict)\n \n torch.save(net, output_file )\n \n \ndef predict(saved_model_file, test_set, ner_model, id_to_lbl, lbl_to_id, tok_to_id, pad_lbl_id, output_file):\n \n Y = (1 - (test_set[1] == tok_to_id['PAD_TOK'])).float()\n testset = NERDataset(test_set[0], test_set[1], Y)\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=4)\n \n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n net = torch.load(saved_model_file).to(device)\n all_labels = []\n for i, mbatch in enumerate(testloader):\n W, X, mask = mbatch\n W, X, mask = W.to(device), X.to(device), mask.to(device)\n P = net(W, X)\n labels = net.predict(P, mask)\n \n for label in labels:\n all_labels.append(label)\n \n return all_labels\n \n \n", "sub_path": "A1/LSTM-CRF/crf.py", "file_name": "crf.py", "file_ext": "py", "file_size_in_byte": 13880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "tqdm.auto.tqdm", "line_number": 25, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 34, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 41, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 51, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tqdm.auto.tqdm", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.full", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 227, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 230, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 230, "usage_type": "call"}, {"api_name": "time.time", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 237, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.CosineAnnealingLR", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 238, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 238, "usage_type": "name"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_value_", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 286, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 286, "usage_type": "name"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 302, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 302, "usage_type": "attribute"}, {"api_name": "torch.full", "line_number": 317, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 354, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 361, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 361, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 363, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 364, "usage_type": "call"}]} +{"seq_id": "650668983", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Open file with MV profiles consumption data\ndf = pd.read_excel('CCH.xlsx')\ndf['DataLectura'] = pd.to_datetime(df['DataLectura'], yearfirst=True)\ndf.index = df['DataLectura']\nreference_id = df['CUPS'].unique()\ndf['weekday'] = df['DataLectura'].dt.weekday\n\n# Sum up all the consumption for the reference weeks for each quarter\nconsumption_hour_march = np.asarray([0 for x in range(169)])\nconsumption_hour_june = np.asarray([0 for x in range(169)])\nconsumption_hour_september = np.asarray([0 for x in range(169)])\nconsumption_hour_december = np.asarray([0 for x in range(169)])\n\ntotal_id = len(reference_id)\nfor i in range(total_id):\n df1 = df.loc[df['CUPS'] == reference_id[i]]\n df_week_march = df1.loc['20180221230000':'20180228']\n df_week_june = df1.loc['201706132300':'20170620']\n df_week_september = df1.loc['201709112300':'20170918']\n df_week_december = df1.loc['201711042300':'20171111'] # get November instead to avoid holidays\n\n # Order from Monday to Sunday\n df2_march = df_week_march.loc[df_week_march['weekday'] == 0]\n df2_june = df_week_june.loc[df_week_june['weekday'] == 0]\n df2_september = df_week_september.loc[df_week_september['weekday'] == 0]\n df2_december = df_week_december.loc[df_week_december['weekday'] == 0]\n for j in range(1, 7):\n df3_march = df_week_march.loc[df_week_march['weekday'] == j]\n df3_june = df_week_june.loc[df_week_june['weekday'] == j]\n df3_september = df_week_september.loc[df_week_september['weekday'] == j]\n df3_december = df_week_december.loc[df_week_december['weekday'] == j]\n\n frame_march = [df2_march, df3_march]\n frame_june = [df2_june, df3_june]\n frame_september = [df2_september, df3_september]\n frame_december = [df2_december, df3_december]\n\n df2_march = pd.concat(frame_march)\n df2_june = pd.concat(frame_june)\n df2_september = pd.concat(frame_september)\n df2_december = pd.concat(frame_december)\n\n consumption_hour_march = df2_march['ActivaImport'] / 1000 + consumption_hour_march # in kW\n consumption_hour_june = df2_june['ActivaImport'] / 1000 + consumption_hour_june\n consumption_hour_september = df2_september['ActivaImport'] / 1000 + consumption_hour_september\n consumption_hour_december = df2_december['ActivaImport'] / 1000 + consumption_hour_december\n\n consumption_hour_march = np.asarray(consumption_hour_march)\n consumption_hour_june = np.asarray(consumption_hour_june)\n consumption_hour_september = np.asarray(consumption_hour_september)\n consumption_hour_december = np.asarray(consumption_hour_december)\n\n# Assume the reference week to be the selected one of the year 2016\nnew_index_march = pd.date_range('14/03/2016 0:15', periods=169, freq='H')\nnew_index_june = pd.date_range('06/06/2016 0:15', periods=169, freq='H')\nnew_index_september = pd.date_range('09/12/2016 0:15', periods=169, freq='H')\nnew_index_december = pd.date_range('12/05/2016 0:15', periods=169, freq='H')\n\nconsumption_hour_march = pd.Series(data=consumption_hour_march, index=new_index_march)\nconsumption_hour_june = pd.Series(data=consumption_hour_june, index=new_index_june)\nconsumption_hour_september = pd.Series(data=consumption_hour_september, index=new_index_september)\nconsumption_hour_december = pd.Series(data=consumption_hour_december, index=new_index_december)\n\n# get the energy each 15 min\npower_15min_march = consumption_hour_march.resample('15min').interpolate(method='linear') # still in kW\npower_15min_june = consumption_hour_june.resample('15min').interpolate(method='linear')\npower_15min_september = consumption_hour_september.resample('15min').interpolate(method='linear')\npower_15min_december = consumption_hour_december.resample('15min').interpolate(method='linear')\n\nconsumption_15min_march = power_15min_march * .25 # get the kWh\nconsumption_15min_june = power_15min_june * .25\nconsumption_15min_september = power_15min_september * .25\nconsumption_15min_december = power_15min_december * .25\n\nconsumption_15min_march.drop(consumption_15min_march.tail(1).index, inplace=True)\nconsumption_15min_june.drop(consumption_15min_june.tail(1).index, inplace=True)\nconsumption_15min_september.drop(consumption_15min_september.tail(1).index, inplace=True)\nconsumption_15min_december.drop(consumption_15min_december.tail(1).index, inplace=True)\n\n\ndf_print = pd.DataFrame({\n 'final consumption march': consumption_15min_march,\n 'final consumption june': consumption_15min_june,\n 'final consumption september': consumption_15min_september,\n 'final consumption december': consumption_15min_december\n})\ndf_print.to_csv('MV_demand.csv', index_label='time')\n\n\nplt.plot(consumption_hour_june)\nplt.plot(power_15min_june)\nplt.show()\n", "sub_path": "MV_consumption.py", "file_name": "MV_consumption.py", "file_ext": "py", "file_size_in_byte": 4809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.read_excel", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "620499438", "text": "import requests\nimport re\nfrom bs4 import BeautifulSoup\n\ndef get_law(url, headers):\n html = requests.get(url, headers = headers).content.decode('utf-8')\n soup = BeautifulSoup(html, 'html.parser')\n\n views = []\n view_list = soup.find_all('h2', 'headline')\n for view in view_list:\n view_title = view.find('a').get_text()\n view_a = view.find('a')\n view_link = view_a['href']\n views.append([view_title, view_link])\n\n news = []\n new_list = soup.find_all('p', 'pis-title')\n for new in new_list:\n new_title = new.find('a').get_text()\n new_a = new.find('a')\n new_link = new_a['href']\n news.append([new_title, new_link])\n \n yield{\n 'view': views,\n 'new': news\n }\n \n\ndef main():\n url = 'http://conflictoflaws.net'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3722.400 QQBrowser/10.5.3738.400'\n }\n \n law = get_law(url, headers)\n for _ in law:\n print(_)\n\n\nif __name__ == '__main__':\n main()", "sub_path": "每日邮件(自写完结)/law.py", "file_name": "law.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "14301656", "text": "from skimage.measure import compare_ssim as ssim\nfrom skimage import io\nfrom skimage.viewer import ImageViewer\nimport threading\nimport pickle\nimport viewFilteredImages as vf\n\ndef compare_images(im1path, im2path, compare_func, size=(512, 144)):\n im1 = io.imread(fname=im1path)\n im2 = io.imread(fname=im2path)\n\n if (im1.size != size) or (im2.size != size):\n im1.resize(size)\n im2.resize(size)\n return compare_func(im1, im2)\n\n\ndef compare_image_to_group(im1path, grp, compare_func):\n comp_dic = {} # type: dict\n for img in grp:\n comp_dic[img] = compare_images(im1path, img, compare_func)\n return comp_dic\n\n\ndef dict_sum(dictionary):\n _sum = 0 # type: float\n for key in dictionary:\n _sum = float(dictionary[key]) + _sum\n return _sum\n\n\ndef show_image_from_path(image_path):\n image = io.imread(image_path)\n viewer = ImageViewer(image)\n viewer.show()\n\n\nclass CompareImageToGroupThread(threading.Thread):\n def __init__(self, thread_id, name, im, target_group, comp_func):\n threading.Thread.__init__(self)\n self.thread_id = thread_id\n self.name = name\n self.im = im\n self.target_group = target_group\n self.comp_func = comp_func\n self.comp_dic = 0\n\n def run(self):\n threadLimiter.acquire()\n try:\n print(\"Starting \" + self.name + \"\\n\")\n self.comp_dic = compare_image_to_group(self.im, self.target_group, self.comp_func)\n print(\"Exiting \" + self.name + \"\\n\")\n dicSums[self.im] = dict_sum(self.comp_dic)\n #print(\"\\n-----------------------------------\\n\")\n with open(sys.argv[1] + '/images.dictionary', 'wb+') as dicSumsFile:\n pickle.dump(dicSums, dicSumsFile)\n finally:\n threadLimiter.release()\n\n\ndef compare_group_to_group(group1, target_group, compare_func):\n i = 0\n for im in group1:\n CompareImageToGroupThread(i, im, im, target_group, compare_func).start()\n i = i + 1\n\n\nif __name__ == '__main__':\n import sys\n import os\n import glob\n\n maximumNumberOfThreads = 20\n threadLimiter = threading.BoundedSemaphore(maximumNumberOfThreads)\n dicSums = {}\n imagesToCompare = glob.glob(sys.argv[1] + '/*.jpg')\n try:\n dicSums = vf.get_pickle(sys.argv[1]+'/images.dictionary')\n filteredImages = set(dicSums.keys())\n imagesToCompare = list(set(imagesToCompare) - filteredImages)\n except Exception :\n print(Exception)\n print('[!] file doesn\\'t exist : a new one will be created!')\n\n groupOfImages = list(map(lambda x: os.path.join(sys.argv[2], x), os.listdir(sys.argv[2])))\n print('\\n[!]filtering patient : ',sys.argv[1])\n compare_group_to_group(imagesToCompare, groupOfImages, ssim)", "sub_path": "tools/imagetools/filterImages.py", "file_name": "filterImages.py", "file_ext": "py", "file_size_in_byte": 2786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "skimage.io.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 9, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 10, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 33, "usage_type": "name"}, {"api_name": "skimage.viewer.ImageViewer", "line_number": 34, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 38, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 40, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 57, "usage_type": "call"}, {"api_name": "threading.BoundedSemaphore", "line_number": 75, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 77, "usage_type": "attribute"}, {"api_name": "viewFilteredImages.get_pickle", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 87, "usage_type": "attribute"}, {"api_name": "skimage.measure.compare_ssim", "line_number": 88, "usage_type": "argument"}]} +{"seq_id": "591749989", "text": "# -*- coding: utf-8 -*-\nimport urllib.request, urllib.error\nimport datetime\nfrom bs4 import BeautifulSoup\n\nclass SAXO:\n\n NAME = 'SAXOBANK'\n targetList = []\n kijunbi = None\n posList = {}\n soup = None\n index = {\n \"PLN/JPY\":\"106\",\n \"TRY/JPY\":\"114\",\n \"ZAR/JPY\":\"147\",\n }\n tables = {}\n\n def __init__(self, targetList,diff_day):\n\n try:\n self.targetList = targetList\n self.kijunbi = datetime.datetime.today() - datetime.timedelta(days=diff_day)\n yyyy = \"{0:%Y}\".format(self.kijunbi) \n m = \"{}\".format(self.kijunbi.month)\n for targetPair in self.targetList:\n url = \"http://saxobank.co.jp/swaplist/\" + self.index[targetPair] + \"/\" + yyyy + \"/\" + m\n html = urllib.request.urlopen(url)\n # BeautifulSoupで扱えるようにパースします\n self.soup = BeautifulSoup(html, \"html.parser\")\n table = self.soup.find(\"table\",id=\"tbl-swap-points\")\n self.tables[targetPair] = table\n except:\n pass\n\n\n def getSwapList(self):\n returnPrice = {}\n try:\n nowTime = \"{0:%m月%d日}\".format(self.kijunbi) \n for targetPair in self.targetList:\n tbody = self.tables[targetPair].find(\"tbody\")\n trs = tbody.find_all(\"tr\")\n bid_swap = '-'\n ask_swap = '-'\n for tr in trs:\n # 対象日の行\n if(nowTime in tr.find_all(\"td\")[0].text):\n # 売SWAP取得\n bid_swap = tr.find_all(\"td\")[1].text\n # 買SWAP取得\n ask_swap = tr.find_all(\"td\")[2].text\n returnPrice[targetPair] = {\"bid_swap\" : bid_swap,\"ask_swap\":ask_swap}\n except:\n pass\n return returnPrice\n\n def getSwap(self, target):\n bid_swap = '-'\n ask_swap = '-'\n returnPrice = {}\n returnPrice[target] = {\"bid_swap\":bid_swap,\"ask_swap\":ask_swap}\n try:\n nowTime = \"{0:%m月%d日}\".format(self.kijunbi) \n if target not in self.tables:return returnPrice\n tbody = self.tables[target].find(\"tbody\")\n trs = tbody.find_all(\"tr\")\n bid_swap = '-'\n ask_swap = '-'\n for tr in trs:\n # 対象日の行\n if(nowTime in tr.find_all(\"td\")[0].text):\n # 売SWAP取得\n bid_swap = tr.find_all(\"td\")[1].text\n # 買SWAP取得\n ask_swap = tr.find_all(\"td\")[2].text\n returnPrice[target] = {\"bid_swap\" : bid_swap,\"ask_swap\":ask_swap}\n except:\n pass\n return returnPrice\n\n\nif __name__ == '__main__':\n saxo = SAXO(['PLN/JPY','TRY/JPY'],diff_day=2)\n print(\"======================================\")\n print(saxo.getSwapList())\n print(saxo.getSwap('PLN/JPY'))\n print(\"======================================\")\n\n", "sub_path": "exchange/saxo.py", "file_name": "saxo.py", "file_ext": "py", "file_size_in_byte": 3068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "datetime.datetime.today", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 29, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 29, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "435429560", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 15:58:28 2019\n\n@author: Bahij\n\"\"\"\n\n#data preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Value Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#The ANN\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\nmodel = Sequential()\n\n#add input and first hidden layer with dropout\nmodel.add(Dense(activation = 'relu', units = 6, kernel_initializer = 'uniform', input_dim = 10))\nmodel.add(Dropout(rate = 0.1))\n# add second hidden layer\nmodel.add(Dense(activation = 'relu', units = 6, kernel_initializer = 'uniform'))\nmodel.add(Dropout(rate = 0.1))\n#add output layer\nmodel.add(Dense(activation = 'sigmoid', units = 1, kernel_initializer = 'uniform'))\n\n#compile the ann\nmodel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# fit ann to training set\nmodel.fit(X_train, y_train, batch_size = 40, epochs = 80)\n\n#part 3\n# Predicting the Test set results\ny_pred = model.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\nnewpred = model.predict(sc.transform(np.array([[600.0,0,1,40,3,60000,2,1,1,50000]])))\nnewpred = (newpred > 0.5)\nprint(newpred)\n", "sub_path": "DeepLearning/ArtificialNeuralNetwork/ann.py", "file_name": "ann.py", "file_ext": "py", "file_size_in_byte": 1911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "343792964", "text": "import nnets.hidden_layer as nhl\n\nimport theano\nimport theano.tensor as T\n\nimport numpy\n\n\n\nclass rbm_layer:\n\n def __init__(self,x,n_visible,n_hidden,rng,\n W = None,hbias = None,vbias = None,theano_rng = None):\n if theano_rng is None:theano_rng = RandomStreams(rng.randint(2**30))\n self.theano_rng = theano_rng\n\n #sigmoid_layer = nhl.hidden_layer(x,n_in,n_hidden,rng,'sigm')\n if W is None:\n Warray = numpy.asarray(rng.uniform(\n low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),\n high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),\n size=(n_visible,n_hidden)),dtype=theano.config.floatX)\n W = theano.shared(value = Warray,name = 'W',borrow = True)\n if hbias is None:\n hbiasarray = numpy.zeros(n_hidden,dtype = theano.config.floatX)\n hbias = theano.shared(value = hbiasarray,name = 'hbias',borrow = True)\n if vbias is None:\n vbiasarray = numpy.zeros(n_visible,dtype = theano.config.floatX)\n vbias = theano.shared(value = vbiasarray,name = 'vbias',borrow = True)\n\n self.n_in = n_in\n self.n_out = n_out\n self.n_visible = n_visible\n self.n_hidden = n_hidden\n self.x = x\n self.y = sigmoid_layer.y\n self.params = [W,hbias,vbias]\n self.W = W\n self.hbias = hbias\n self.vbias = vbias\n\n\n # propagate the visible units activation upwards to the hidden units\n def propup(self,vis):\n pre_sigmoid_activation = T.dot(vis,self.W) + self.hbias\n return pre_sigmoid_activation,T.nnet.sigmoid(pre_sigmoid_activation)\n\n\n # propagates the hidden units activation downwards to the visible units\n def propdown(self,hid):\n pre_sigmoid_activation = T.dot(hid,self.W.T) + self.vbias\n return pre_sigmoid_activation,T.nnet.sigmoid(pre_sigmoid_activation)\n\n\n # infer state of hidden units given visible units \n def sample_h_given_v(self,v0_sample):\n # compute the activation of the hidden units given a sample of the visibles\n pre_sigmoid_h1,h1_mean = self.propup(v0_sample)\n h1_sample = self.theano_rng.binomial(\n size = h1_mean.shape,n = 1,p = h1_mean,dtype = theano.config.floatX)\n return pre_sigmoid_h1,h1_mean,h1_sample\n\n\n # infers state of visible units given hidden units \n def sample_v_given_h(self,h0_sample):\n # compute the activation of the visible given the hidden sample\n pre_sigmoid_v1,v1_mean = self.propdown(h0_sample)\n v1_sample = self.theano_rng.binomial(\n size = v1_mean.shape,n = 1,p = v1_mean,dtype = theano.config.floatX)\n return pre_sigmoid_v1,v1_mean,v1_sample\n\n\n # implements one step of Gibbs sampling, starting from the hidden state\n def gibbs_hvh(self,h0_sample):\n pre_sigmoid_v1,v1_mean,v1_sample = self.sample_v_given_h(h0_sample)\n pre_sigmoid_h1,h1_mean,h1_sample = self.sample_h_given_v(v1_sample)\n return pre_sigmoid_v1,v1_mean,v1_sample,pre_sigmoid_h1,h1_mean,h1_sample\n\n\n # implements one step of Gibbs sampling, starting from the visible state\n def gibbs_vhv(self, v0_sample):\n pre_sigmoid_h1,h1_mean,h1_sample = self.sample_h_given_v(v0_sample)\n pre_sigmoid_v1,v1_mean,v1_sample = self.sample_v_given_h(h1_sample)\n return pre_sigmoid_h1,h1_mean,h1_sample,pre_sigmoid_v1,v1_mean,v1_sample\n\n\n # compute the free energy \n def free_energy(self,v_sample):\n wx_b = T.dot(v_sample,self.W) + self.hbias\n vbias_term = T.dot(v_sample,self.vbias)\n hidden_term = T.sum(T.log(1 + T.exp(wx_b)),axis = 1)\n return -hidden_term - vbias_term\n\n\n # stochastic approximation to the pseudo-likelihood\n def get_pseudo_likelihood_cost(self, updates):\n # index of bit i in expression p(x_i | x_{\\i})\n bit_i_idx = theano.shared(value = 0,name = 'bit_i_idx')\n # binarize the input image by rounding to nearest integer\n xi = T.round(self.x)\n # calculate free energy for the given bit configuration\n fe_xi = self.free_energy(xi)\n # flip bit x_i of matrix xi and preserve all other bits x_{\\i}\n # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], \n # but assigns the result to xi_flip, instead of working in place on xi.\n xi_flip = T.set_subtensor(xi[:,bit_i_idx],1 - xi[:,bit_i_idx])\n # calculate free energy with bit flipped\n fe_xi_flip = self.free_energy(xi_flip)\n # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\\i})))\n cost = T.mean(self.n_visible*T.log(T.nnet.sigmoid(fe_xi_flip - fe_xi)))\n # increment bit_i_idx % number as part of updates\n updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible\n return cost\n\n\n # approximation to the reconstruction error\n def get_reconstruction_cost(self,updates,pre_sigmoid_nv):\n cross_entropy = T.mean(T.sum(\n self.x*T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +\n (1 - self.x)*T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),axis = 1))\n return cross_entropy\n\n\n # implement one step of CD-k or PCD-k\n # persistent: None for CD. For PCD, shared variable containing old state of Gibbs chain. \n # This must be a shared variable of size (batch size, number of hidden units).\n # k: number of Gibbs steps to do in CD-k/PCD-k\n def get_cost_updates(self,lr = 0.1,persistent = None,k = 1):\n # compute positive phase\n pre_sigmoid_ph,ph_mean,ph_sample = self.sample_h_given_v(self.x)\n # decide how to initialize persistent chain:\n # for CD, we use the newly generate hidden sample\n # for PCD, we initialize from the old state of the chain\n if persistent is None:chain_start = ph_sample\n else:chain_start = persistent\n # perform actual negative phase\n # in order to implement CD-k/PCD-k we need to scan over the\n # function that implements one gibbs step k times.\n # scan will return the entire Gibbs chain\n ([pre_sigmoid_nvs,nv_means,nv_samples,\n pre_sigmoid_nhs,nh_means,nh_samples],updates) = theano.scan(\n self.gibbs_hvh,n_steps = k,name = 'gibbs_hvh',\n outputs_info = [None,None,None,None,None,chain_start])\n # determine gradients on RBM parameters\n # NOTE: that we only need the sample at the end of the chain\n chain_end = nv_samples[-1]\n cost = T.mean(self.free_energy(self.x)) - T.mean(self.free_energy(chain_end))\n gparams = T.grad(cost,self.params,consider_constant = [chain_end])\n for gparam, param in zip(gparams, self.params):\n updates[param] = param - gparam*T.cast(lr,dtype = theano.config.floatX)\n # pseudo-likelihood is a better proxy for PCD\n # reconstruction cross-entropy is a better proxy for CD\n if persistent:\n # NOTE: that this works only if persistent is a shared variable\n updates[persistent] = nh_samples[-1]\n monitoring_cost = self.get_pseudo_likelihood_cost(updates)\n else:monitoring_cost = self.get_reconstruction_cost(updates,pre_sigmoid_nvs[-1])\n return monitoring_cost,updates\n\n\n\n\n", "sub_path": "src/nnets/rbm_layer.py", "file_name": "rbm_layer.py", "file_ext": "py", "file_size_in_byte": 7201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "numpy.asarray", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 21, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 22, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 25, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "theano.config", "line_number": 28, "usage_type": "attribute"}, {"api_name": "theano.shared", "line_number": 29, "usage_type": "call"}, {"api_name": "theano.tensor.dot", "line_number": 45, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 45, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 46, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 46, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 46, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 51, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 51, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 52, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 52, "usage_type": "attribute"}, {"api_name": "theano.tensor", "line_number": 52, "usage_type": "name"}, {"api_name": "theano.config", "line_number": 60, "usage_type": "attribute"}, {"api_name": "theano.config", "line_number": 69, "usage_type": "attribute"}, {"api_name": "theano.tensor.dot", "line_number": 89, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 89, "usage_type": "name"}, {"api_name": "theano.tensor.dot", "line_number": 90, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 90, "usage_type": "name"}, {"api_name": "theano.tensor.sum", "line_number": 91, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 91, "usage_type": "name"}, {"api_name": "theano.tensor.log", "line_number": 91, "usage_type": "call"}, {"api_name": "theano.tensor.exp", "line_number": 91, "usage_type": "call"}, {"api_name": "theano.shared", "line_number": 98, "usage_type": "call"}, {"api_name": "theano.tensor.round", "line_number": 100, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 100, "usage_type": "name"}, {"api_name": "theano.tensor.set_subtensor", "line_number": 106, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 106, "usage_type": "name"}, {"api_name": "theano.tensor.mean", "line_number": 110, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 110, "usage_type": "name"}, {"api_name": "theano.tensor.log", "line_number": 110, "usage_type": "call"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 110, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 110, "usage_type": "attribute"}, {"api_name": "theano.tensor.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 118, "usage_type": "name"}, {"api_name": "theano.tensor.sum", "line_number": 118, "usage_type": "call"}, {"api_name": "theano.tensor.log", "line_number": 119, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 119, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 119, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 119, "usage_type": "attribute"}, {"api_name": "theano.tensor.log", "line_number": 120, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 120, "usage_type": "name"}, {"api_name": "theano.tensor.nnet.sigmoid", "line_number": 120, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 120, "usage_type": "attribute"}, {"api_name": "theano.scan", "line_number": 141, "usage_type": "call"}, {"api_name": "theano.tensor.mean", "line_number": 147, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 147, "usage_type": "name"}, {"api_name": "theano.tensor.grad", "line_number": 148, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 148, "usage_type": "name"}, {"api_name": "theano.tensor.cast", "line_number": 150, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 150, "usage_type": "name"}, {"api_name": "theano.config", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "509291108", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*- vim60:fdm=marker\n#\n# Copyright: 2016, Maximiliano Curia \n#\n# License: ISC\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n# .\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND ISC DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE\n# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n''' Miscelaneous data types '''\n\nimport re\n\nfrom collections import namedtuple\n\nReSub = namedtuple('ReSub', ('re', 'repl'))\nUNKNOWN = 'Unknown'\nUNKNOWN_COPYRIGHTED = 'UnknownCopyrighted'\n\n\nclass License(object):\n\n licenses = {}\n\n def __init__(self, name):\n self.name = name\n self.stored = None\n\n def __str__(self):\n if self.stored:\n return self.stored.dump().rstrip('\\n')\n return ('License: {name}\\n'\n 'Comment: Add the corresponding license text here'.format(\n name=self.name))\n\n @staticmethod\n def get(name):\n if name not in License.licenses:\n License.licenses[name] = License(name)\n\n return License.licenses[name]\n\n\nclass YearRange(object):\n\n def __init__(self, low=0, high=0):\n low_value = int(low)\n high_value = int(high)\n if low_value > high_value:\n low_value, high_value = high_value, low_value\n self.low = low_value\n self.high = high_value\n\n def __in__(self, year):\n value = int(year)\n return self.low <= value <= self.high\n\n def add(self, year):\n value = int(year)\n if not value:\n return\n if not self.low or value < self.low:\n self.low = value\n if not self.high or self.high < value:\n self.high = value\n return self\n\n def newer(self, other):\n if self.high and other.high:\n return other.high > self.high\n return not self.high and other.high\n\n def merge(self, other):\n self.add(other.low)\n self.add(other.high)\n return self\n\n def __str__(self):\n if not self.low:\n return ''\n if self.low == self.high:\n return str(self.low)\n return str(self.low) + '-' + str(self.high)\n\n\nclass CopyrightHolder(object):\n\n year_re = re.compile(r'\\s*(?:[\\s:([]*)?(?P\\d{2,})[]:\\s]*'\n r'(?:[-~=–—][\\s:[]*(?P\\d{1,})[]:\\s]*)?[,/)]*')\n holder_re = re.compile(r'\\s*(?:by\\s*)?(?P\\S.*?\\S)[\\s\"\\*,;/]*$', re.I)\n\n name_email_re = re.compile(\n r'(?P\\S.*?\\S)?(?(name)(?:\\s|(?=[<(])|$)|)\\s*'\n r'[<(/\\\\]*(?P[^\\s<>]+?@[^\\s<>@]+?)?(?(email)[)<>/\\\\]*|)$')\n\n email_subs = [\n ReSub(re.compile(r''), r''),\n ReSub(re.compile(r'%20'), r' '),\n ReSub(re.compile(r'<?'), r'<'),\n ReSub(re.compile(r'>?'), r'>'),\n ReSub(re.compile(r'@'), r'@'),\n ReSub(re.compile(r'“?'), r'\"'),\n ReSub(re.compile(r'\\(c\\)$', re.I), r''),\n # Expensive fix for \") at the end of the string\n ReSub(re.compile(r'((?P\\()?(?(paren).*?|))(?(paren)|\\)+)?$'), r'\\1'),\n ReSub(re.compile(r'\\s+\\(?(where|at|@)\\)?\\s+', re.I), r'@'),\n ReSub(re.compile(r'\\(at\\)', re.I), r'@'),\n ReSub(re.compile(r'\\s+\\(?do?[tm]\\)?\\s+', re.I), r'.'),\n # Ugly fix for >mail@example.com<\n ReSub(re.compile(r'(?:^|(?<=\\s))\\s*\\>\\s*(?=\\w(?:\\w|[.-])*@)'), r'<'),\n ReSub(re.compile(r'\\<\\s*$'), r'>'),\n ReSub(re.compile(r'(?:^|(?<=\\s))\\s*((?!\\<)\\w(?:\\w|[.-])*@'\n r'?:\\w(?:\\w|-)+(?:\\.\\w(?:\\w|-)+)+(?))\\s*(?:(?=\\s)|$)'),\n r'<\\1>'),\n ]\n\n name_cruft_subs = [\n ReSub(re.compile(r'', re.IGNORECASE), r''),\n ReSub(re.compile(r'^>', re.IGNORECASE), r''),\n ReSub(re.compile(r'<$', re.IGNORECASE), r''),\n ReSub(re.compile(r'\\\\[nt]$', re.IGNORECASE), r''),\n ReSub(re.compile(r'^\\(\\s*c\\s*\\)\\s*', re.IGNORECASE), r''),\n ]\n\n def __init__(self, name, email, years):\n self.name = name\n self.email = email\n self.years = years\n\n def merge(self, other):\n if other.name and self.years.newer(other.years):\n self.name = other.name\n self.years.merge(other.years)\n return self\n\n @property\n def person(self):\n result = self.name\n if self.name and self.email:\n result += ' '\n if self.email:\n result += '<{}>'.format(self.email)\n return result\n\n def __str__(self):\n result = str(self.years)\n result += ', ' if result else ''\n result += self.person\n return result\n\n def __repr__(self):\n return str(self)\n\n @staticmethod\n def _get_year(text):\n year = int(text)\n if year < 50:\n year += 2000\n if year < 100:\n year += 1900\n return year\n\n @staticmethod\n def get_name_email(text):\n # De-cruft email\n for sub in CopyrightHolder.email_subs:\n text = sub.re.sub(sub.repl, text)\n\n match = CopyrightHolder.name_email_re.match(text)\n if not match:\n return None, None\n match_dict = match.groupdict()\n name = match_dict.get('name', '')\n if name is None:\n name = ''\n name = name.strip(r''',.;*'\"@-–—[]{} ''')\n for sub in CopyrightHolder.name_cruft_subs:\n name = sub.re.sub(sub.repl, name)\n email = match_dict.get('email', None)\n return name, email\n\n @staticmethod\n def from_copyright(copyright_):\n\n def get_years(text, years):\n start = len(text)\n end = 0\n year_match = CopyrightHolder.year_re.search(text)\n while year_match:\n match_dict = year_match.groupdict()\n low = CopyrightHolder._get_year(match_dict['lo'])\n years.add(low)\n if match_dict.get('hi', None):\n high = CopyrightHolder._get_year(match_dict['hi'])\n if high < low:\n # 2001-4 -> '200' + '4'\n # new_high = \\\n # match_dict['lo'][:- len(match_dict['hi'])] + \\\n # match_dict['hi']\n high = CopyrightHolder._get_year(match_dict['hi'])\n years.add(high)\n if start > year_match.start(0):\n start = year_match.start(0)\n end = year_match.end(0)\n year_match = CopyrightHolder.year_re.match(text, end)\n return start, end\n\n years = YearRange()\n start, end = get_years(copyright_, years)\n if start < end:\n copyright_ = copyright_[:start] + copyright_[end:]\n\n match = CopyrightHolder.holder_re.match(copyright_)\n if match:\n holder = match.group('holder')\n name, email = CopyrightHolder.get_name_email(holder)\n if not name and not email:\n return\n return CopyrightHolder(name, email, years)\n", "sub_path": "decopy/datatypes.py", "file_name": "datatypes.py", "file_ext": "py", "file_size_in_byte": 7539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "collections.namedtuple", "line_number": 25, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 97, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 99, "usage_type": "call"}, {"api_name": "re.I", "line_number": 99, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 101, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 106, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 107, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 108, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 109, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 110, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 111, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 112, "usage_type": "call"}, {"api_name": "re.I", "line_number": 112, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 114, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 115, "usage_type": "call"}, {"api_name": "re.I", "line_number": 115, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 116, "usage_type": "call"}, {"api_name": "re.I", "line_number": 116, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 117, "usage_type": "call"}, {"api_name": "re.I", "line_number": 117, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 119, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 120, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 121, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 127, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 127, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 128, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 129, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 130, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 130, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 131, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "289275204", "text": "import os\nimport json\nimport shutil\n\ndel_extension = {\n\t'.tmp': '临时文件',\n\t'._mp': '临时文件_mp',\n\t'.log': '日志文件',\n\t'.gid': '临时帮助文件',\n\t'.chk': '磁盘检查文件',\n\t'.old': '临时备份文件',\n\t'.xlk': 'Excel备份文件',\n\t'.bak': '临时备份文件bak'\n}\ndel_userprofile = ['cookies', 'recent', 'Temporary Internet Files', 'Temp']\ndel_windir = ['prefetch', 'temp']\n\n# 获取系统盘\nSYS_DRIVE = os.environ['systemdrive'] + '\\\\'\n# 获取用户目录\nUSER_PROFILE = os.environ['userprofile']\n# 获取 Windows 目录\nWIN_DIR = os.environ['windir']\n\n# 获取当前路径 os.getcwd() 'E:\\\\Software\\\\Python27'\n# 跳转至指定的文件目录 os.chdir('d://wamp')\n# 获取系统盘符 os.environ['systemdrive'] 'C:'\n# 获取用户目录 os.environ['userprofile'] 'C:\\\\Users\\\\Administrator'\n# 获取 Windows 目录 os.environ['windir'] 'C:\\\\Windows'\n\n# 删除文件或文件夹\ndef del_dir_or_file(root):\n\ttry:\n\t\tif os.path.isfile(root):\n\t\t\t# 删除文件\n\t\t\tos.remove(root)\n\t\t\tprint('file: ' + root + ' removed\\n')\n\t\telif os.path.isdir(root):\n\t\t\t# 删除文件夹\n\t\t\tshutil.rmtree(root)\n\t\t\tprint('directory: ' + root + ' removed\\n')\n\texcept WindowsError:\n\t\tprint('failure: ' + root + \" can't remove\\n\")\n\n# 字节bytes转化kb\\m\\g\ndef formatSize(bytes):\n\ttry:\n\t\tbytes = float(bytes)\n\t\tkb = bytes / 1024\n\texcept:\n\t\tprint('The incoming byte format is not correct!')\n\t\treturn \"Error\"\n\tif kb >= 1024:\n\t\tM = kb / 1024\n\t\tif M >= 1024:\n\t\t\tG = M / 1024\n\t\t\treturn \"%fG\" % (G)\n\t\telse:\n\t\t\treturn \"%fM\" % (M)\n\telse:\n\t\treturn \"%fkb\" % (kb)\n\nclass DiskClean(object):\n\tdef __init__(self):\n\t\tself.del_info = {}\n\t\tself.del_file_paths = []\n\t\tself.total_size = 0\n\t\tfor k,v in del_extension.items():\n\t\t\tself.del_info[k] = dict(name = v, count = 0)\n\n\tdef scan(self):\n\t\tfor roots, dirs, files in os.walk(USER_PROFILE, topdown=False):\n\t\t\t# 生成并展开以 root 为根目录的目录树,参数 topdown 设定展开方式从底层到顶层\n\t\t\tfor file_item in files:\n\t\t\t\t# 获取扩展名\n\t\t\t\tfile_extension = os.path.splitext(file_item)[1]\n\t\t\t\t# print os.path.join(roots, file_item)\n\t\t\t\tif file_extension in self.del_info:\n\t\t\t\t\t# 文件完整路径\n\t\t\t\t\tfile_full_path = os.path.join(roots, file_item)\n\t\t\t\t\tself.del_file_paths.append(file_full_path)\n\t\t\t\t\tself.del_info[file_extension]['count'] += 1\n\t\t\t\t\tself.total_size += os.path.getsize(file_full_path)\n\n\tdef show(self):\n\t\tprint(json.dumps(self.del_info, indent=4, ensure_ascii=False))\n\t\tprint('删除可节省:%s 空间' % formatSize(self.total_size))\n\n\tdef delete_files(self):\n\t\tfor i in self.del_file_paths:\n\t\t\tdel_dir_or_file(i)\n\nif __name__ == '__main__':\n\tcleaner = DiskClean()\n\tcleaner.scan()\n\tcleaner.show()\n\tif_del = input('是否删除y/n:')\n\tif if_del == 'y':\n\t\tcleaner.delete_files()\n", "sub_path": "DiskClean.py", "file_name": "DiskClean.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 40, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "470096506", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('musicworld', '0004_article_likes'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Periodical',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('content', models.TextField(max_length=1000)),\n ('image', models.FileField(verbose_name='periodical-image', upload_to='image/periodical')),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Ptype',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='periodical',\n name='ptype',\n field=models.ForeignKey(to='musicworld.Ptype'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "musicworld/musicworld/migrations/0005_auto_20150104_1609.py", "file_name": "0005_auto_20150104_1609.py", "file_ext": "py", "file_size_in_byte": 1394, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "509719627", "text": "\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\n\nfile_path = \"./3D_surface_and_contour.jpg\"\np = 0.05\nf = -0.01\n\ndef get_data(p):\n x, y, z = axes3d.get_test_data(p)\n print(z)\n z = f * z\n return x, y, z\n\ndef plot_3d_contour(p, f):\n nrows = 4\n ncols = 5\n\n x, y, z = get_data(p)\n\n x_min, x_max = np.min(x), np.max(x)\n y_min, y_max = np.min(y), np.max(y)\n z_min, z_max = np.min(z), np.max(z)\n\n fig = plt.figure(figsize=(15, 10))\n for n in range(nrows * ncols):\n i = n % ncols\n j = n / ncols\n k = n + 1\n if j == 0:\n azim = -60 + (i - 2) * 15\n elev = 30\n elif j == 1:\n azim = -60\n elev = 30 + (i - 2) * 5\n elif j == 2:\n azim = 60 + (i - 2) * 10\n elev = 30\n elif j == 3:\n azim = 60\n elev = 30 + (i - 2) * 5\n ax = fig.add_subplot(nrows, ncols, k, projection='3d')\n ax.set_title(\"azim=\" + str(azim) + \" elev=\" + str(elev))\n ax.tick_params(labelsize=8)\n ax.view_init(azim=azim, elev=elev)\n ax.plot_surface(x, y, z, rstride=10, cstride=10, alpha=0.3)\n ax.contourf(x, y, z, zdir='z', offset=z_min, cmap=cm.coolwarm)\n ax.contourf(x, y, z, zdir='x', offset=x_min, cmap=cm.coolwarm)\n if j == 0 or j == 1:\n ax.contourf(x, y, z, zdir='y', offset=y_max, cmap=cm.coolwarm)\n elif j == 2 or j == 3:\n ax.contourf(x, y, z, zdir='y', offset=y_min, cmap=cm.coolwarm)\n\n ax.set_xlabel('X')\n ax.set_xlim(x_min, x_max)\n ax.set_ylabel('Y')\n ax.set_ylim(y_min, y_max)\n ax.set_zlabel('Z')\n ax.set_zlim(z_min, z_max)\n\n plt.savefig(file_path, dpi=80)\n plt.show()\n plt.close()\n\nplot_3d_contour(p, f)", "sub_path": "002.abnormal_dect/99.test/3d.py", "file_name": "3d.py", "file_ext": "py", "file_size_in_byte": 1842, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "mpl_toolkits.mplot3d.axes3d.get_test_data", "line_number": 12, "usage_type": "call"}, {"api_name": "mpl_toolkits.mplot3d.axes3d", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 49, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 50, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 54, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "219726182", "text": "import pandas as pd\n\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n##reading data set\ntrain_df = pd.read_csv('/content/train.csv')\n\n\n\ntrain_df['Sex'] = train_df['Sex'].map( {'female': 1, 'male': 0} )\nprint(train_df[[\"Sex\", \"Survived\"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))\n\n# print(train_df['Survived'].corr(train_df['Sex']))\n\n##Analyze by visualizing data\n####Correlating numerical features\ntrain_df.corr()\ng = sns.FacetGrid(train_df, col='Survived')\ng.map(plt.hist, 'Sex', bins=20)\nplt.show()", "sub_path": "Coreelation.py", "file_name": "Coreelation.py", "file_ext": "py", "file_size_in_byte": 568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "seaborn.FacetGrid", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "194061422", "text": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append(r'./efficientdet-pytorch')\nimport torch\nfrom effdet.config import get_efficientdet_config\nfrom effdet.efficientdet import EfficientDet\nimport argparse\n\nparser = argparse.ArgumentParser(description='pth to onnx')\n\n\nparser.add_argument('--batch_size', type=int, default=1, metavar='N',\n help='batch size 1/4/8/16/32')\nparser.add_argument('--checkpoint', type=str, default='d0.pth', metavar='N',\n help='pytorch checkpoint path ')\nparser.add_argument('--out', type=str, default='d7.onnx', metavar='N',\n help='export onnx model')\n\n\nargs = parser.parse_args()\nconfig = get_efficientdet_config(model_name='tf_efficientdet_d7')\nmodel = EfficientDet(config=config,pretrained_backbone=False)\nmodel_path = args.checkpoint\nmodel.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))\nmodel.eval()\nexample = torch.randn(args.batch_size, 3, 1536, 1536)\nexport_onnx_file = args.out\ntorch.onnx.export(model, example, export_onnx_file, do_constant_folding=True, verbose=True, opset_version=11)", "sub_path": "ACL_PyTorch/contrib/cv/detection/EfficientDetD7/EfficientDetD7_pth2onnx.py", "file_name": "EfficientDetD7_pth2onnx.py", "file_ext": "py", "file_size_in_byte": 1671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "effdet.config.get_efficientdet_config", "line_number": 34, "usage_type": "call"}, {"api_name": "effdet.efficientdet.EfficientDet", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.onnx.export", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.onnx", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "136707624", "text": "from typing import Any, Dict\n\nimport arrow\nfrom rsserpent.utils import HTTPClient, cached\n\n\npath = \"/bilibili/user/{uid}/video\"\n\n\n@cached\nasync def provider(uid: int) -> Dict[str, Any]:\n \"\"\"订阅 up 上传的最新视频.\"\"\"\n user_info_api = f\"https://api.bilibili.com/x/space/acc/info?mid={uid}&jsonp=jsonp\"\n video_list_api = (\n f\"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30\"\n \"&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp\"\n )\n\n async with HTTPClient() as client:\n user_info = (await client.get(user_info_api)).json()\n video_list = (await client.get(video_list_api)).json()\n\n username = user_info[\"data\"][\"name\"]\n\n return {\n \"title\": f\"{username}的最新投稿视频\",\n \"link\": f\"https://space.bilibili.com/{uid}/video\",\n \"description\": user_info[\"data\"][\"sign\"],\n \"items\": [\n {\n \"title\": item[\"title\"],\n \"description\": item[\"description\"],\n \"link\": f\"https://www.bilibili.com/video/{item['bvid']}\",\n \"pubDate\": arrow.get(item[\"created\"]),\n \"author\": username,\n }\n for item in video_list[\"data\"][\"list\"][\"vlist\"]\n ],\n }\n", "sub_path": "rsserpent_plugin_bilibili/user/video.py", "file_name": "video.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "rsserpent.utils.HTTPClient", "line_number": 19, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 34, "usage_type": "call"}, {"api_name": "rsserpent.utils.cached", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "109415324", "text": "\"\"\"Allocation API tests.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport mock\n\nfrom treadmill import admin\nfrom treadmill.api import allocation\n\n\nclass ApiAllocationTest(unittest.TestCase):\n \"\"\"treadmill.api.allocation tests.\"\"\"\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=None))\n def setUp(self):\n self.alloc = allocation.API()\n\n def tearDown(self):\n pass\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=admin.Admin(None, None)))\n @mock.patch('treadmill.admin.Allocation.list',\n mock.Mock(return_value=[]))\n @mock.patch('treadmill.admin.CellAllocation.list',\n mock.Mock(return_value=[]))\n def test_list(self):\n \"\"\"Dummy test for treadmill.api.allocation._list()\"\"\"\n alloc_admin = admin.Allocation(None)\n self.alloc.list()\n alloc_admin.list.assert_called_with({})\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=admin.Admin(None, None)))\n @mock.patch('treadmill.admin.Allocation.create',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.admin.Allocation.get',\n mock.Mock(return_value={'environment': 'prod'}))\n @mock.patch('treadmill.admin.CellAllocation.create',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.admin.CellAllocation.get',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.api.allocation._check_capacity',\n mock.Mock(return_value=True))\n def test_reservation(self):\n \"\"\"Dummy test for treadmill.api.allocation._list()\"\"\"\n alloc_admin = admin.CellAllocation(None)\n self.alloc.reservation.create(\n 'tenant/alloc/cellname',\n {'memory': '1G',\n 'cpu': '100%',\n 'disk': '2G',\n 'partition': None})\n alloc_admin.create.assert_called_with(\n ['cellname', 'tenant/alloc'],\n {'disk': '2G',\n 'partition': None,\n 'cpu': '100%',\n 'rank': 100,\n 'memory': '1G'},\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/api/allocation_test.py", "file_name": "allocation_test.py", "file_ext": "py", "file_size_in_byte": 2348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "unittest.TestCase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "treadmill.api.allocation.API", "line_number": 23, "usage_type": "call"}, {"api_name": "treadmill.api.allocation", "line_number": 23, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 20, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 21, "usage_type": "call"}, {"api_name": "treadmill.admin.Allocation", "line_number": 36, "usage_type": "call"}, {"api_name": "treadmill.admin", "line_number": 36, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 28, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 29, "usage_type": "call"}, {"api_name": "treadmill.admin.Admin", "line_number": 29, "usage_type": "call"}, {"api_name": "treadmill.admin", "line_number": 29, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 30, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 31, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 32, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 33, "usage_type": "call"}, {"api_name": "treadmill.admin.CellAllocation", "line_number": 54, "usage_type": "call"}, {"api_name": "treadmill.admin", "line_number": 54, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 40, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 41, "usage_type": "call"}, {"api_name": "treadmill.admin.Admin", "line_number": 41, "usage_type": "call"}, {"api_name": "treadmill.admin", "line_number": 41, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 42, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 43, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 44, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 45, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 46, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 47, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 48, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 49, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 50, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 51, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "135165109", "text": "import string\nimport random\nfrom itertools import combinations\nfrom collections import deque\n\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n\n\nclass SocialGraph:\n def __init__(self):\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n\n\n def addFriendship(self, userID, friendID):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n\n def addUser(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.lastID += 1 # automatically increment the ID to assign the new user\n\n\n\n self.users[self.lastID] = User(name) \n self.friendships[self.lastID] = set()\n\n def bfs(self, starting_vertex, target): \n visited_bfs = []\n queue = deque()\n queue.append([starting_vertex])\n visited = {}\n # avg = 0\n \n while queue: \n path = queue.popleft()\n last_node = path[-1:][0]\n if last_node not in visited_bfs:\n visited[last_node] = path\n # avg += (len(path) - 1)\n # print(last_node, path)\n if last_node == target:\n return path\n visited_bfs.append(last_node)\n for v in self.friendships[last_node]:\n new_list = list(path)\n new_list.append(v)\n queue.append(new_list)\n # print('Avg', avg)\n return visited\n\n def populateGraph(self, numUsers, avgFriendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n for i in range(numUsers):\n self.addUser(f'User {i}') \n\n # Create friendship pairs \n # len = friends(friends - 1)/2 O(n^2)\n possible_friendships = list(combinations(range(1, numUsers+1), 2))\n\n random.shuffle(possible_friendships) # O(n)\n\n T = int(numUsers/2 * avgFriendships) # total friendship needed O(1)\n\n actual_friendships = possible_friendships[:T] # O(1)\n\n # c=0\n for friendship in actual_friendships: # O(n) \n # c += 1\n self.addFriendship(friendship[0], friendship[1])\n # print(c, 'C')\n\n\n\n \n\n def getAllSocialPaths(self, userID):\n \"\"\"\n Takes a user's userID as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n # visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n\n # visited = self.bfs(userID, self.users[1])\n\n return self.bfs(userID, self.users[1])\n\n\n # !! When there are 100 users with 10 friends each, addFriendship must be called 500 times. \n # !! (100 users / 2) * (10 friends / 1 user) => 500 friendships formed\n # !! The 2 is needed since the friendships are bi-directional\n\n # !! When there are 1000 users with an average of 5 friends:\n # !! 100% of users are in a user's extended social network\n # !! (with such a large number there were no empty sets.)\n # !! On average they are 3.2 degrees of separation from each other. Most are 4 degrees apart with some a little closer.\n\n # !! Stretch: Instead of creating one average for the numbers of friends provide different averages for different population groups. \n # !! Higher average number of contacts for people who are: outgoing, travel frequently, live in larger cities, involved in many social clubs,etc. And a lower average for the inverse.\n\n\n\n\nif __name__ == '__main__':\n sg = SocialGraph()\n sg.populateGraph(10, 2) # 30/10\n print(sg.friendships)\n connections = sg.getAllSocialPaths(1)\n print(connections)\n # print('*' * 10)\n # sg2 = SocialGraph()\n # sg2.populateGraph(100, 10) # >200/100\n # # print(sg2.friendships)\n # connections2 = sg2.getAllSocialPaths(1)\n # print(connections2)\n # print('*' * 10)\n # sg3 = SocialGraph()\n # sg3.populateGraph(1000, 10)\n # # print(sg2.friendships)\n # connections3 = sg3.getAllSocialPaths(1) # >3000/1000\n # print(connections3)", "sub_path": "projects/graph/social/social.py", "file_name": "social.py", "file_ext": "py", "file_size_in_byte": 5031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "collections.deque", "line_number": 46, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 90, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "9260589", "text": "#!/usr/bin/env python3\nimport argparse\nimport logging\nimport gc\nimport objgraph\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.client import device_lib\n\n\nimport numpy as np\n\nfrom trainer.ellington_library import EllingtonLibrary, Track\nfrom trainer.audio import Audio\nfrom trainer.generator import LibraryIterator, TrackIterator\nfrom trainer.model import model_gen\n\nclass CustomCallback(keras.callbacks.Callback):\n def __init__(self, jobd):\n self.step = 0\n self.jobd = jobd\n self.metric_cache = {}\n\n def on_batch_end(self, batch, logs={}):\n self.step = self.step + 1\n for k in self.params['metrics']:\n if k in logs:\n self.metric_cache[k] = self.metric_cache.get(k, 0) + logs[k]\n\n metrics_log = ''\n for (k, v) in self.metric_cache.items():\n if abs(v) > 1e-3:\n metrics_log += ' - %s: %.4f' % (k, v)\n else:\n metrics_log += ' - %s: %.4e' % (k, v)\n\n print('step: {}/{} ::{}'.format(self.step,\n self.params['steps'],\n metrics_log))\n self.metric_cache.clear()\n\n gc.collect() \n\n def on_epoch_end(self, epoch, logs={}): \n print(\"Saving model\")\n\n # Save the model locally\n self.model.save('model.h5')\n\n # Save the model to the Cloud Storage bucket's jobs directory\n print(\"Saving to : \" + self.jobd)\n with file_io.FileIO('model.h5', mode='rb') as input_f:\n with file_io.FileIO(self.jobd + '-model.h5', mode='w+') as output_f:\n output_f.write(input_f.read())\n\n\ndef main(data_dir=\"data/smnp/\", ellington_lib=\"data/example.el\", job_dir=\"logs\"):\n # Start logging\n logging.basicConfig(\n format='%(asctime)s %(levelname)s %(module)s %(lineno)d : %(message)s', level=logging.DEBUG)\n\n # List the available tensorflow devices\n print(device_lib.list_local_devices()) \n\n # Set up the data input etc.\n train_lib = EllingtonLibrary.from_file(ellington_lib)\n valid_lib = EllingtonLibrary.from_file(ellington_lib)\n\n # Set up the generators to yield training data\n training_gen = LibraryIterator(\n train_lib, data_dir, samples=128, batchsize=512, start=30, end=150, iterations=1)\n validation_gen = LibraryIterator(\n valid_lib, data_dir, samples=4, batchsize=64, start=30, end=200, iterations=1)\n\n # Fix an input size for our model\n input_time_dim = 1720\n input_freq_dim = 256\n\n # Create the model, print info\n model = model_gen(input_time_dim, input_freq_dim)\n print(model.summary())\n\n # Compile the model\n sgd = keras.optimizers.SGD(\n lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd,\n loss='mse',\n metrics=['mae', 'msle', 'mape'])\n\n # Set up callbacks - one for tensorboard\n tfcallback = keras.callbacks.TensorBoard(log_dir=job_dir + \"/tensorboard/\",\n histogram_freq=0,\n write_grads=True,\n write_graph=False,\n write_images=False,\n batch_size=32)\n # And another for our custom callback that saves the model.\n bcallback = CustomCallback(job_dir)\n\n # One for a progress bar\n prog_bar = keras.callbacks.ProgbarLogger(count_mode='steps')\n\n # Fit the model using all of the above!\n model.fit_generator(\n generator = training_gen.batch(), \n steps_per_epoch = training_gen.len(), \n epochs = 1000, \n verbose = 2, \n callbacks = [tfcallback, bcallback, prog_bar], \n validation_data = validation_gen.batch(), \n validation_steps = validation_gen.len(),\n use_multiprocessing=True \n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-dir', required=True, help='Path to training data, in the form of compressed numpy arrays')\n parser.add_argument('--ellington-lib', required=True, help='The ellington library from which to read track names and BPMs')\n parser.add_argument('--job-dir', required=True, help='The directory to export the model, and store temp files')\n args = parser.parse_args()\n arguments = args.__dict__\n main(**arguments)\n", "sub_path": "bellson/bellson.py", "file_name": "bellson.py", "file_ext": "py", "file_size_in_byte": 4500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "tensorflow.keras.callbacks", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 20, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.python.lib.io.file_io.FileIO", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.python.lib.io.file_io", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.python.lib.io.file_io.FileIO", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.python.lib.io.file_io", "line_number": 55, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.python.client.device_lib.list_local_devices", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib", "line_number": 65, "usage_type": "name"}, {"api_name": "trainer.ellington_library.EllingtonLibrary.from_file", "line_number": 68, "usage_type": "call"}, {"api_name": "trainer.ellington_library.EllingtonLibrary", "line_number": 68, "usage_type": "name"}, {"api_name": "trainer.ellington_library.EllingtonLibrary.from_file", "line_number": 69, "usage_type": "call"}, {"api_name": "trainer.ellington_library.EllingtonLibrary", "line_number": 69, "usage_type": "name"}, {"api_name": "trainer.generator.LibraryIterator", "line_number": 72, "usage_type": "call"}, {"api_name": "trainer.generator.LibraryIterator", "line_number": 74, "usage_type": "call"}, {"api_name": "trainer.model.model_gen", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 86, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.TensorBoard", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 93, "usage_type": "name"}, {"api_name": "tensorflow.keras.callbacks.ProgbarLogger", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 103, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "344231579", "text": "from django.db import models\nimport django.contrib.auth.models as authModels\n\n\nclass Article (models.Model):\n #Publication date of an article\n datePub = models.DateTimeField('Publication date')\n #Title of an article\n title = models.CharField(max_length=200)\n #Type of article: either SenProc or Wiki\n typeArticleList = (\n ('ScenProc', 'Scenario/Procédure'),\n ('Wiki', 'Wiki')\n )\n TArticle = models.CharField(\n max_length=10,\n choices=typeArticleList,\n default='Wiki'\n )\n permOnArticle = models.ForeignKey(\n 'RolesArticle',\n null=True\n )\n #Tag linked to Article, not yet implemented\n tagArt = models.ManyToManyField(\n 'Tag',\n blank=True,\n null=True\n )\n\n\nclass Content (models.Model):\n #Modification date\n dateModif = models.DateTimeField('Modification date')\n #Content, in HTML\n contentHTML = models.TextField()\n #Content, in Markdown\n contentMD = models.TextField()\n #Object that refers to the previous version of the content, set to NULL if\n #we create an article\n contentPrev = models.ForeignKey(\n 'Content',\n blank=True,\n null=True\n )\n #Language available for an article (Def: Fr, Available: Fr, En)\n #First, we define our language list with a Field.choices\n languageList = (\n ('FR', 'Français'),\n ('EN', 'English')\n )\n #Then we define the method filed.\n language = models.CharField(\n max_length=2,\n choices=languageList,\n default='FR'\n )\n #Object that referes to the article\n ArticleRef = models.ForeignKey(\n 'Article',\n null=True\n )\n class Tag (models.Model):\n tagName = models.CharField(max_length=50)\n\n\nclass WikiArt (Article):\n Article.TArticle='Wiki'\n\n\nclass ScenProcArt(Article):\n Article.TArticle='ScenProc'\n #Difficulty for a procedure (because of skills required, ...)\n difficulty= models.IntegerField()\n #Duration, in second, for a procedure to be executed\n duration=models.IntegerField()\n\n\nclass RolesArticle(models.Model):\n #Read permition on an article\n canRead = models.BooleanField(default=True)\n #Write permition on an article\n canWrite = models.BooleanField(default=False)\n #Role associated to this permissions\n roleRef = models.ForeignKey(\n 'Role',\n null = True\n )\n\n\n# This class defined the role in the C4 structure\n# e.g, its defined the operator's level\nclass Role(models.Model):\n #Name of the specific role: N1, N2,...\n roleName = models.CharField(max_length=15)\n # Reference to the system user\n userRoleRef = models.ForeignKey(\n 'UserRoles',\n null = True)\n #References to the system group\n GroupRoleRef = models.ForeignKey(\n 'GroupRoles',\n null = True\n )\n\n\n#Defined the user in the system, not in the C4\n#e.g, its defined the user\nclass UserRoles (models.Model):\n #The user's adminitrative level: students, teachers, ...\n userRoleName = models.CharField(max_length=15)\n #Ref to the django user class\n userRef = models.ManyToManyField(\n authModels.User,\n null=True\n )\n\n\n#Defined the group int the system.\n#e.g, its defined the user's administrative group\nclass GroupRoles (models.Model):\n #The group's adminitrative level:\n roleGroup = models.CharField(max_length=10)\n #Ref to the django group class\n grpRef = models.ManyToManyField(\n authModels.Group,\n null=True\n )\n", "sub_path": "c4wiki/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3723, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 76, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 95, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 101, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 109, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 111, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 114, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 123, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 123, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 125, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 125, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 126, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "643532290", "text": "from django.test import TestCase\n#models\nfrom catalog.models import Author\n#forms\nimport datetime\nfrom django.utils import timezone\nfrom catalog.forms import RenewBookForm, RenewBookModelForm\n#views\nfrom django.core.urlresolvers import reverse\n#loan\nfrom catalog.models import BookInstance, Book, Genre\nfrom django.contrib.auth.models import User #Required to assign User as a borrower\n\n\n# Create your tests here.\n#views loan test\nclass LoanedBookInstancesByUserListViewTest(TestCase):\n\n def setUp(self):\n #Create two users\n test_user1 = User.objects.create_user(username='testuser1', password='12345') \n test_user1.save()\n test_user2 = User.objects.create_user(username='testuser2', password='12345') \n test_user2.save()\n \n #Create a book\n test_author = Author.objects.create(first_name='John', last_name='Smith')\n test_genre = Genre.objects.create(name='Fantasy')\n #test_language = Language.objects.create(name='English')\n test_book = Book.objects.create(title='Book Title', summary = 'My book summary', isbn='ABCDEFG', author=test_author,) #language=test_language)\n # Create genre as a post-step\n genre_objects_for_book = Genre.objects.all()\n test_book.genre=genre_objects_for_book\n test_book.save()\n\n #Create 30 BookInstance objects\n number_of_book_copies = 30\n for book_copy in range(number_of_book_copies):\n return_date= timezone.now() + datetime.timedelta(days=book_copy%5)\n if book_copy % 2:\n the_borrower=test_user1\n else:\n the_borrower=test_user2\n status='m'\n BookInstance.objects.create(book=test_book,imprint='Unlikely Imprint, 2016', due_back=return_date, borrower=the_borrower, status=status)\n \n def test_redirect_if_not_logged_in(self):\n resp = self.client.get(reverse('my-borrowed'))\n self.assertRedirects(resp, '/accounts/login/?next=/mybooks/')\n\n def test_logged_in_uses_correct_template(self):\n login = self.client.login(username='testuser1', password='12345')\n resp = self.client.get(reverse('my-borrowed'))\n \n #Check our user is logged in\n self.assertEqual(str(resp.context['user']), 'testuser1')\n #Check that we got a response \"success\"\n self.assertEqual(resp.status_code, 200)\n\n #Check we used correct template\n self.assertTemplateUsed(resp, 'bookinstance_list_borrowed_user.html')\n\n#views test\nclass AuthorListViewTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n #Create 13 authors for pagination tests\n number_of_authors = 13\n for author_num in range(number_of_authors):\n Author.objects.create(first_name='Christian %s' % author_num, last_name = 'Surname %s' % author_num,)\n \n def test_view_url_exists_at_desired_location(self): \n resp = self.client.get('/authors/') \n self.assertEqual(resp.status_code, 200) \n \n def test_view_url_accessible_by_name(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n \n def test_view_uses_correct_template(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n\n self.assertTemplateUsed(resp, 'author_list.html')\n \n def test_pagination_is_ten(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('is_paginated' in resp.context)\n self.assertTrue(resp.context['is_paginated'] == True)\n self.assertTrue( len(resp.context['author_list']) == 10)\n\n def test_lists_all_authors(self):\n #Get second page and confirm it has (exactly) remaining 3 items\n resp = self.client.get(reverse('authors')+'?page=2')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('is_paginated' in resp.context)\n self.assertTrue(resp.context['is_paginated'] == True)\n self.assertTrue( len(resp.context['author_list']) == 3)\n\n#forms test\nclass RenewBookFormTest(TestCase):\n\n def test_renew_form_date_field_label(self):\n form = RenewBookForm() \n self.assertTrue(form.fields['renewal_date'].label == None or form.fields['renewal_date'].label == 'renewal date')\n\n def test_renew_form_date_field_help_text(self):\n form = RenewBookForm()\n self.assertEqual(form.fields['renewal_date'].help_text,'Enter a date between now and 4 weeks (default 3).')\n\n def test_renew_form_date_in_past(self):\n date = datetime.date.today() - datetime.timedelta(days=1)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n def test_renew_form_date_too_far_in_future(self):\n date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n def test_renew_form_date_today(self):\n date = datetime.date.today()\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertTrue(form.is_valid())\n \n def test_renew_form_date_max(self):\n date = timezone.now() + datetime.timedelta(weeks=4)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertTrue(form.is_valid())\n\n#models test\nclass AuthorModelTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n #Set up non-modified objects used by all test methods\n Author.objects.create(first_name='Big', last_name='Bob')\n\n def test_first_name_label(self):\n author=Author.objects.get(id=1)\n field_label = author._meta.get_field('first_name').verbose_name\n self.assertEquals(field_label,'first name')\n\n def test_date_of_death_label(self):\n author=Author.objects.get(id=1)\n field_label = author._meta.get_field('date_of_death').verbose_name\n self.assertEquals(field_label,'Died')\n\n def test_first_name_max_length(self):\n author=Author.objects.get(id=1)\n max_length = author._meta.get_field('first_name').max_length\n self.assertEquals(max_length,100)\n\n def test_object_name_is_last_name_comma_first_name(self):\n author=Author.objects.get(id=1)\n expected_object_name = '%s, %s' % (author.last_name, author.first_name)\n self.assertEquals(expected_object_name,str(author))\n\n def test_get_absolute_url(self):\n author=Author.objects.get(id=1)\n #This will also fail if the urlconf is not defined.\n self.assertEquals(author.get_absolute_url(),'/author/1')\n\n''' class YourTestClass(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n print(\"setUpTestData: Run once to set up non-modified data for all class methods.\")\n pass\n\n def setUp(self):\n print(\"setUp: Run once for every test method to setup clean data.\")\n pass\n\n def test_false_is_false(self):\n print(\"Method: test_false_is_false.\")\n self.assertFalse(False)\n\n def test_false_is_true(self):\n print(\"Method: test_false_is_true.\")\n self.assertTrue(False)\n\n def test_one_plus_one_equals_two(self):\n print(\"Method: test_one_plus_one_equals_two.\")\n self.assertEqual(1 + 1, 2) '''\n", "sub_path": "catalog/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 7470, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.test.TestCase", "line_number": 17, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 27, "usage_type": "name"}, {"api_name": "catalog.models.Genre.objects.create", "line_number": 28, "usage_type": "call"}, {"api_name": "catalog.models.Genre.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "catalog.models.Genre", "line_number": 28, "usage_type": "name"}, {"api_name": "catalog.models.Book.objects.create", "line_number": 30, "usage_type": "call"}, {"api_name": "catalog.models.Book.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "catalog.models.Book", "line_number": 30, "usage_type": "name"}, {"api_name": "catalog.models.Genre.objects.all", "line_number": 32, "usage_type": "call"}, {"api_name": "catalog.models.Genre.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "catalog.models.Genre", "line_number": 32, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}, {"api_name": "catalog.models.BookInstance.objects.create", "line_number": 45, "usage_type": "call"}, {"api_name": "catalog.models.BookInstance.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "catalog.models.BookInstance", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 53, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 64, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.create", "line_number": 71, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 71, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 78, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 82, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 88, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 96, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 103, "usage_type": "name"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 106, "usage_type": "call"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 114, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 120, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 120, "usage_type": "call"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 122, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 126, "usage_type": "attribute"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 128, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 132, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 132, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 132, "usage_type": "call"}, {"api_name": "catalog.forms.RenewBookForm", "line_number": 134, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 138, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.create", "line_number": 143, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 143, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.get", "line_number": 146, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 146, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.get", "line_number": 151, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 151, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.get", "line_number": 156, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 156, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 156, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.get", "line_number": 161, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 161, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 161, "usage_type": "name"}, {"api_name": "catalog.models.Author.objects.get", "line_number": 166, "usage_type": "call"}, {"api_name": "catalog.models.Author.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "catalog.models.Author", "line_number": 166, "usage_type": "name"}]} +{"seq_id": "574757488", "text": "import sys\nimport winreg\nfrom argparse import ArgumentParser\n\n\ndef search(needle):\n found = False\n with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, \"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Installer\\\\UserData\", access=winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as userDataParentHandle:\n for userDataIndex in range(0, winreg.QueryInfoKey(userDataParentHandle)[0]):\n user = winreg.EnumKey(userDataParentHandle, userDataIndex)\n with winreg.OpenKey(userDataParentHandle, user) as userDataHandle:\n with winreg.OpenKey(userDataHandle, \"Components\") as componentsParentHandle:\n for componentIndex in range(0, winreg.QueryInfoKey(componentsParentHandle)[0]):\n with winreg.OpenKey(componentsParentHandle, winreg.EnumKey(componentsParentHandle, componentIndex)) as componentHandle:\n for valueIndex in range(0, winreg.QueryInfoKey(componentHandle)[1]):\n valueName, valueData = winreg.EnumValue(componentHandle, valueIndex)[0:2]\n if needle.casefold() in valueData.casefold():\n with winreg.OpenKey(userDataHandle, \"Products\\\\\" + valueName + \"\\\\InstallProperties\") as propertiesHandle:\n if not found:\n found = True\n else:\n print()\n\n print(\"File: \" + valueData)\n print(\"Product: \" + winreg.QueryValueEx(propertiesHandle, \"DisplayName\")[0])\n print(\"Install user: \" + user)\n print(\"Cached installer: \" + winreg.QueryValueEx(propertiesHandle, \"LocalPackage\")[0])\n\n if not found:\n print('No file path containing \"{}\" was found in any installed package.'.format(needle))\n\n\ndef search_command(opts):\n return search(opts.pattern)\n\n\ndef packages_command(opts):\n print('{} - Not yet implemented'.format(opts.command))\n\n\ndef components_command(opts):\n print('{} - Not yet implemented'.format(opts.command))\n\n\ndef create_parser(prog_name):\n parser = ArgumentParser(prog=prog_name)\n sp = parser.add_subparsers(title='commands', dest='command', description='valid commands:')\n search = sp.add_parser('search', help='Search for a file within an installed component')\n search.add_argument('pattern', help='Name of the file')\n search.set_defaults(func=search_command)\n packages = sp.add_parser('packages', help='Inventory packages on this system')\n packages.set_defaults(func=packages_command)\n components = sp.add_parser('components', help='Show components of a package')\n components.add_argument('pattern', help='Name of the package')\n components.set_defaults(func=components_command)\n return parser\n\n\ndef main():\n parser = create_parser(sys.argv[0])\n opts = parser.parse_args(sys.argv[1:])\n if not hasattr(opts, 'func'):\n parser.print_help()\n else:\n opts.func(opts)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "win_inventory.py", "file_name": "win_inventory.py", "file_ext": "py", "file_size_in_byte": 3195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "winreg.OpenKey", "line_number": 8, "usage_type": "call"}, {"api_name": "winreg.HKEY_LOCAL_MACHINE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "winreg.KEY_READ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "winreg.KEY_WOW64_64KEY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "winreg.QueryInfoKey", "line_number": 9, "usage_type": "call"}, {"api_name": "winreg.EnumKey", "line_number": 10, "usage_type": "call"}, {"api_name": "winreg.OpenKey", "line_number": 11, "usage_type": "call"}, {"api_name": "winreg.OpenKey", "line_number": 12, "usage_type": "call"}, {"api_name": "winreg.QueryInfoKey", "line_number": 13, "usage_type": "call"}, {"api_name": "winreg.OpenKey", "line_number": 14, "usage_type": "call"}, {"api_name": "winreg.EnumKey", "line_number": 14, "usage_type": "call"}, {"api_name": "winreg.QueryInfoKey", "line_number": 15, "usage_type": "call"}, {"api_name": "winreg.EnumValue", "line_number": 16, "usage_type": "call"}, {"api_name": "winreg.OpenKey", "line_number": 18, "usage_type": "call"}, {"api_name": "winreg.QueryValueEx", "line_number": 25, "usage_type": "call"}, {"api_name": "winreg.QueryValueEx", "line_number": 27, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "537697667", "text": "\"\"\"\n.. _ex-source-space-power-phase-locking:\n\n=========================================================\nCompute power and phase lock in label of the source space\n=========================================================\n\nCompute time-frequency maps of power and phase lock in the source space.\nThe inverse method is linear based on dSPM inverse operator.\n\nThe example also shows the difference in the time-frequency maps\nwhen they are computed with and without subtracting the evoked response\nfrom each epoch. The former results in induced activity only while the\nlatter also includes evoked (stimulus-locked) activity.\n\"\"\"\n# Authors: Alexandre Gramfort \n#\n# License: BSD-3-Clause\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, source_induced_power\n\nprint(__doc__)\n\n# %%\n# Set parameters\ndata_path = sample.data_path()\nmeg_path = data_path / \"MEG\" / \"sample\"\nraw_fname = meg_path / \"sample_audvis_raw.fif\"\nfname_inv = meg_path / \"sample_audvis-meg-oct-6-meg-inv.fif\"\nlabel_name = \"Aud-rh\"\nfname_label = meg_path / \"labels\" / f\"{label_name}.label\"\n\ntmin, tmax, event_id = -0.2, 0.5, 2\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname)\nevents = mne.find_events(raw, stim_channel=\"STI 014\")\ninverse_operator = read_inverse_operator(fname_inv)\n\ninclude = []\nraw.info[\"bads\"] += [\"MEG 2443\", \"EEG 053\"] # bads + 2 more\n\n# Picks MEG channels\npicks = mne.pick_types(\n raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude=\"bads\"\n)\nreject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)\n\n# Load epochs\nepochs = mne.Epochs(\n raw,\n events,\n event_id,\n tmin,\n tmax,\n picks=picks,\n baseline=(None, 0),\n reject=reject,\n preload=True,\n)\n\n# Compute a source estimate per frequency band including and excluding the\n# evoked response\nfreqs = np.arange(7, 30, 2) # define frequencies of interest\nlabel = mne.read_label(fname_label)\nn_cycles = freqs / 3.0 # different number of cycle per frequency\n\n# subtract the evoked response in order to exclude evoked activity\nepochs_induced = epochs.copy().subtract_evoked()\n\nplt.close(\"all\")\n\nfor ii, (this_epochs, title) in enumerate(\n zip([epochs, epochs_induced], [\"evoked + induced\", \"induced only\"])\n):\n # compute the source space power and the inter-trial coherence\n power, itc = source_induced_power(\n this_epochs,\n inverse_operator,\n freqs,\n label,\n baseline=(-0.1, 0),\n baseline_mode=\"percent\",\n n_cycles=n_cycles,\n n_jobs=None,\n )\n\n power = np.mean(power, axis=0) # average over sources\n itc = np.mean(itc, axis=0) # average over sources\n times = epochs.times\n\n ##########################################################################\n # View time-frequency plots\n plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)\n plt.subplot(2, 2, 2 * ii + 1)\n plt.imshow(\n 20 * power,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect=\"auto\",\n origin=\"lower\",\n vmin=0.0,\n vmax=30.0,\n cmap=\"RdBu_r\",\n )\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Frequency (Hz)\")\n plt.title(\"Power (%s)\" % title)\n plt.colorbar()\n\n plt.subplot(2, 2, 2 * ii + 2)\n plt.imshow(\n itc,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect=\"auto\",\n origin=\"lower\",\n vmin=0,\n vmax=0.7,\n cmap=\"RdBu_r\",\n )\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Frequency (Hz)\")\n plt.title(\"ITC (%s)\" % title)\n plt.colorbar()\n\nplt.show()\n", "sub_path": "examples/time_frequency/source_label_time_frequency.py", "file_name": "source_label_time_frequency.py", "file_ext": "py", "file_size_in_byte": 3682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "mne.datasets.sample.data_path", "line_number": 34, "usage_type": "call"}, {"api_name": "mne.datasets.sample", "line_number": 34, "usage_type": "name"}, {"api_name": "mne.io.read_raw_fif", "line_number": 44, "usage_type": "call"}, {"api_name": "mne.io", "line_number": 44, "usage_type": "name"}, {"api_name": "mne.find_events", "line_number": 45, "usage_type": "call"}, {"api_name": "mne.minimum_norm.read_inverse_operator", "line_number": 46, "usage_type": "call"}, {"api_name": "mne.pick_types", "line_number": 52, "usage_type": "call"}, {"api_name": "mne.Epochs", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 72, "usage_type": "call"}, {"api_name": "mne.read_label", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "mne.minimum_norm.source_induced_power", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "113305928", "text": "import csv\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom busstation.models import Station, Route\n\n\nclass Command(BaseCommand):\n help = 'Load data from files'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n BASE_DIR = os.path.dirname(settings.BASE_DIR)\n file_path = os.path.join(BASE_DIR, 'moscow_bus_stations.csv')\n with open(file_path, 'r') as csvfile:\n station_reader = csv.reader(csvfile, delimiter=';')\n next(station_reader)\n\n for line in station_reader:\n latitude, longitude, raw_routes, name = (line[3], line[2], line[7],\n line[1])\n station = Station.objects.create(name=name, latitude=latitude, longitude=longitude)\n routes = raw_routes.split(';')\n for route in routes:\n route = route.strip()\n route_add = Route.objects.filter(name=route).exists()\n if not route_add:\n route_add = Route.objects.create(name=route)\n station.routes.add(route_add)\n station.save()\n print('Данные из файла moscow_bus_stations.csv загружены')", "sub_path": "project/project/busstation/management/commands/import_stations.py", "file_name": "import_stations.py", "file_ext": "py", "file_size_in_byte": 1351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 20, "usage_type": "call"}, {"api_name": "busstation.models.Station.objects.create", "line_number": 26, "usage_type": "call"}, {"api_name": "busstation.models.Station.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "busstation.models.Station", "line_number": 26, "usage_type": "name"}, {"api_name": "busstation.models.Route.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "busstation.models.Route.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "busstation.models.Route", "line_number": 30, "usage_type": "name"}, {"api_name": "busstation.models.Route.objects.create", "line_number": 32, "usage_type": "call"}, {"api_name": "busstation.models.Route.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "busstation.models.Route", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "417700534", "text": "#!/usr/bin/env python3\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template,abort\nfrom datetime import datetime\nfrom sqlalchemy import create_engine\nfrom pymongo import MongoClient\nimport os\nimport json\n\napp=Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD']=True\napp.config['SQLALCHEMY_DATABASE_URI']='mysql://root@localhost/shiyanlou'\ndb=SQLAlchemy(app)\nengine=create_engine('mysql://root@localhost/shiyanlou') \nclient=MongoClient('127.0.0.1',27017)\ndbm=client.shiyanlou\n#-----------------------------\nclass File(db.Model):\n id=db.Column(db.Integer, primary_key=True)\n title=db.Column(db.String(80))\n created_time=db.Column(db.DateTime)\n category_id=db.Column(db.Integer, db.ForeignKey('category.id'))\n category=db.relationship('Category',backref=db.backref('posts',lazy='dynamic'))\n content=db.Column(db.Text)\n\n def __init__(self,title,created_time,category,content):\n self.title =title\n self.created_time = created_time\n self.category=category \n self.content = content\n\n def __repr__(self):\n return '' % self.title\n \n def add_tag(self,tag_name):\n tag_id=len(self.title) \n tag={'tag_id':tag_id,'tag_name':tag_name}\n dbm.taglist.insert_one(tag)\n\n def remove_tag(self,tag_name):\n dbm.taglist.deleteMany({'tag_name':tag_name})\n\n @property\n def tags(self):\n tag_id=len(self.title) \n tags=[]\n for tag in dbm.taglist.find({'tag_id':tag_id}):\n tags.append(tag['tag_name'])\n return tags\n\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True)\n\n def __init__(self,name):\n self.name = name\n\n def __repr__(self):\n return '' % self.name\n\n#--------------\n\ndef GetTag(title):\n tags=[]\n tag_id=len(title) \n for tag in dbm.taglist.find({'tag_id':tag_id}):\n tags.append(tag['tag_name'])\n return tags\n\n#print(GetTag())\n\ndef GetAll(table):\n comm='select * from '+table\n datas=engine.execute(comm).fetchall()\n idlist=[]\n for data in datas:\n temp={}\n temp['id']=data[0]\n temp['title']=data[1]\n temp['tag']=GetTag(data[1])\n idlist.append(temp)\n return idlist\n\ndef GetInfo(table,id):\n comm='select * from ' +table+' where id= '+id\n datas=engine.execute(comm).fetchall()\n info=[]\n for data in datas:\n temp={}\n temp['id']=data[0]\n temp['title']=data[1]\n temp['create_time']=data[2]\n temp['content']=data[4]\n info.append(temp)\n return info[0]\n\n#----------\n\n@app.route('/')\ndef index():\n pagelist=GetAll('file')\n return render_template('index.html',titles=pagelist)\n\n@app.route('/files/')\ndef file(file_id):\n data = GetInfo('file',file_id)\n if len(data)!=0:\n return render_template('file.html',file=data)\n else:\n abort(404)\n\n@app.errorhandler(404)\ndef not_found(error):\n\treturn render_template('404.html'),404\n \n", "sub_path": "news-w2-3/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 15, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "301804778", "text": "#!/usr/bin/env python\n'''\nCreated on 27/10/2016\n\n@author: sium\n'''\nfrom __future__ import print_function\n\n\n__author__ = 'sium'\n\n__licence__=\"\"\"\nMIT License\n\nCopyright (c) 2017 Sinan Ugur Umu (SUU) sinanugur@gmail.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\n\n__doc__=\"\"\"Variant caller for HPV project.\n\nUsage:\n hpv-variant-call.py [] (--chromosome | --auto) [--discordant] [--reference ] [--start=] [--end=] [--transformed] [--cpu=]\n hpv-variant-call.py [--chromosome | --auto] [--reference ] [--start=] [--end=]\n hpv-variant-call.py (-h | --help)\n hpv-variant-call.py --version\n\nArguments:\n BAM BAM or SAM File name.\n FASTA Output FASTA file name for soft clipped sequences.\n BED Output tab-seperated BED file name for soft clipped sequences.\n OUTCSV Write regular CSV output into a file, not STDOUT.\n -c , --chromosome The name of the chromosome.\n -r , --reference Reference FASTA file.\n -s , --start Start position [default : 0]\n -e , --end End position\n -j , --cpu The number of CPUs for parallel processing. [default : 1] \n\nOptions:\n -a --auto Autodetect chromosome name (with highest coverage) to be fetched. \n -t --transformed Mapped HPV genomes are transformed.\n -h --help Show this screen.\n --version Show version.\n\n\n\"\"\"\n\n\n#prevent sigpipe error\nfrom signal import signal, SIGPIPE, SIG_DFL\nsignal(SIGPIPE,SIG_DFL)\n#########\n\n\nimport pysam\nfrom collections import Counter\nfrom docopt import docopt\n\nimport sys\nfrom math import floor\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom re import search\nfrom re import match\nfrom re import compile\nfrom pathos.multiprocessing import ProcessPool\nfrom functools import reduce\nfrom itertools import repeat\n\n\ndef auto_detect_chromosome_by_coverage(samfile,bam_file):\n hpv_chromosomes = list(filter(lambda x: x.find(\"HPV\") >= 0, samfile.references)) # find HPV chromosomes\n the_list_of_chromosome_counts = list(\n map(lambda chr: [chr, samfile.count(chr)], hpv_chromosomes)) # estimate HPV chromosome coverages\n autodetected_chromosome = reduce(lambda x, y: x if x[1] > y[1] >= 0 else y,\n the_list_of_chromosome_counts) # find the highest coverage\n print(\"The contig with the highest coverage is %s for the BAM file, %s \" % (autodetected_chromosome[0], bam_file),\n file=sys.stderr)\n\n return(autodetected_chromosome[0])\n\n\n\ndef auto_detect_hpv_type_from_file_name(samfile,bam_file):\n\n hpv_name=search('(HPV[0-9]+)',bam_file).group(1)\n hpv_regex = compile(\"\\(\" + hpv_name + \"\\)\")\n\n autodetected_chromosome = list(filter(lambda x: search(hpv_regex,x), samfile.references)) # find HPV chromosome\n \n print(\"The HPV name detected is %s for the BAM file, %s \" % (autodetected_chromosome[0], bam_file),\n file=sys.stderr)\n\n return (autodetected_chromosome[0])\n\n\n\ndef function_position_counter(pileupread,position_counter,quality_counter):\n if not pileupread.is_refskip:\n if not pileupread.is_del:\n base = pileupread.alignment.query_sequence[pileupread.query_position]\n position_counter[base] += 1\n quality_counter[base] += pileupread.alignment.query_qualities[pileupread.query_position]\n else:\n position_counter[\"deletion\"] += 1\n\n else:\n position_counter[\"skip\"] += 1\n\n\n\ndef function_merge_two_dicts(x, y):\n \"\"\"Given two dicts, merge them into a new dict as a shallow copy.\"\"\"\n z = x.copy()\n z.update(y)\n return(z)\n\ndef function_reduce(x,y):\n return((x[0]+y[0],x[1]+y[1]))\n\ndef function_parallel_count(position,bam_file,chromosome):\n samfile = pysam.AlignmentFile(bam_file)\n\n\n position_counter = Counter()\n discordant_counter = Counter()\n quality_counter = Counter()\n discordant_quality_counter = Counter()\n\n if arguments['--discordant']:\n for pileupcolumn in samfile.pileup(chromosome, position, position + 1, truncate=True, max_depth=1000000000):\n for pileupread in pileupcolumn.pileups:\n if (pileupread.alignment.reference_name != pileupread.alignment.next_reference_name):\n function_position_counter(pileupread, discordant_counter, discordant_quality_counter)\n else:\n for pileupcolumn in samfile.pileup(chromosome, position, position + 1, truncate=True, max_depth=1000000000):\n for pileupread in pileupcolumn.pileups:\n function_position_counter(pileupread, position_counter, quality_counter)\n\n samfile.close()\n return({position:(position_counter,quality_counter,discordant_counter,discordant_quality_counter)})\n\n\ndef hpv_variant_table_create(bam_file,chromosome,reference_filename,start,end,csv1):\n\n samfile = pysam.AlignmentFile(bam_file)\n\n if arguments['--auto']:\n\n try:\n chromosome = auto_detect_hpv_type_from_file_name(samfile,bam_file)\n except:\n chromosome = auto_detect_chromosome_by_coverage(samfile, bam_file)\n\n if reference_filename is None:\n sequence = None\n\n else:\n\n\n for record in SeqIO.parse(reference_filename,\"fasta\"):\n if record.id == chromosome:\n sequence=str(record.seq)\n break\n\n\n\n\n start= int(0 if start is None else start) #start position of the fetched location\n end= int(samfile.lengths[samfile.references.index(chromosome)]) if end is None else int(end) #calculate the end by using the chromosome name\n length=int(samfile.lengths[samfile.references.index(chromosome)])\n\n second_half=length - floor(length/2) +1\n first_half=floor(length/2 -1)\n\n function_transformed_position = lambda position: int(\n position + 1 + first_half) if position + 1 <= second_half else int(position + 1 - second_half)\n\n\n\n print(\"chr\\tposition\\treference\\tcoverage\\tA\\tG\\tC\\tT\\tdeletion\\tskip\\tqA\\tqG\\tqC\\tqT\",\n file= csv1 if csv1 else sys.stdout)\n\n\n\n samfile.close()\n with ProcessPool(int(arguments['--cpu'])) as pool:\n res = pool.map(function_parallel_count, range(start,end),repeat(bam_file),repeat(chromosome))\n\n\n results=reduce(function_merge_two_dicts,res)\n\n for position in range(start,end):\n\n if not arguments['--transformed']: # is this a shifted genome, no\n pos = position + 1\n else:\n pos = function_transformed_position(position)\n\n if arguments['--discordant']:\n print_variant_csv_files(results[position][2],results[position][3],chromosome,sequence,position,pos,csv1 if csv1 else sys.stdout)\n else:\n print_variant_csv_files(results[position][0],results[position][1],chromosome,sequence,position,pos,csv1 if csv1 else sys.stdout)\n\n\n\ndef print_variant_csv_files(position_counter,quality_counter,chromosome,sequence,position,pos,where_to_print):\n\n\n print(\"{chromosome}\\t{position}\\t{reference}\\t{coverage}\\t{A}\\t{G}\\t{C}\\t{T}\\t{deletion}\\t{skip}\\t{qA:.2f}\\t{qG:.2f}\\t{qC:.2f}\\t{qT:.2f}\".format(\n chromosome=chromosome, position=pos,\n reference='NA' if sequence is None else sequence[position],\n coverage=position_counter[\"A\"] + position_counter[\"G\"] + position_counter[\"C\"] + position_counter[\"T\"],\n A=position_counter[\"A\"],\n G=position_counter[\"G\"],\n C=position_counter[\"C\"],\n T=position_counter[\"T\"],\n deletion=position_counter[\"deletion\"],\n skip=position_counter['skip'],\n qA=quality_counter[\"A\"] / (position_counter[\"A\"] +0.000000000001),\n qG=quality_counter[\"G\"] / (position_counter[\"G\"] +0.000000000001),\n qC=quality_counter[\"C\"] / (position_counter[\"C\"] +0.000000000001),\n qT=quality_counter[\"T\"] / (position_counter[\"T\"] +0.000000000001)\n ),file=where_to_print)\n\n\ndef fetch_soft_clipped(bam_file,chromosome,start,end,fasta_file,tsv_file):\n\n samfile = pysam.AlignmentFile(bam_file)\n\n if arguments['--auto']:\n\n try:\n chromosomes = list(auto_detect_hpv_type_from_file_name(samfile,bam_file))\n except:\n chromosomes = list(auto_detect_chromosome_by_coverage(samfile, bam_file))\n\n elif chromosome is None:\n chromosomes = samfile.references\n else:\n chromosomes = list(chromosome)\n\n cigarsoft = compile(\"([1-9][0-9]+)S\")\n\n with open(fasta_file,\"w\") as fasta,open(tsv_file,\"w\") as tsv:\n for chromosome in chromosomes:\n start = int(0 if start is None else start) # start position of the fetched location\n end = int(samfile.lengths[samfile.references.index(chromosome)]) if end is None else int(\n end) # calculate the end by using the chromosome name\n\n for read in samfile.fetch(chromosome,start,end):\n if not read.is_unmapped and search(cigarsoft,read.cigarstring):\n #seq_position=0\n #read_aligned_pairs=read.get_aligned_pairs()\n #for i in read.cigartuples:\n #if i[0] == 4 and i[1] >= 10: #detect soft clipped, 4 is for soft clip\n\n\n if match(cigarsoft, read.cigarstring): #if soft clipping at the beginning\n size=int(match(cigarsoft, read.cigarstring).group(1))\n sequence=read.seq[0:size]\n else: #if soft clipping at the end\n size = int(search(cigarsoft, read.cigarstring).group(1))\n sequence = read.seq[-size:]\n\n if read.is_reverse:\n sequence=str(Seq(sequence).reverse_complement()) #take reverse complement if on opposite strand\n\n\n print (\">{read_id}\\n{sequence}\".format(read_id=read.query_name,sequence=sequence),file=fasta)\n feat_start = read.reference_start if match(cigarsoft,read.cigarstring) else read.reference_end\n\n print (\"{ref_id}\\t{feat_start}\\t{feat_end}\\t{name}\\t{score}\\t{strand}\".format(ref_id=read.reference_name,\n feat_start=feat_start,\n feat_end=feat_start+size,\n name=read.query_name,score=1,strand=\".\"),file=tsv)\n\n #break\n #elif i[0] != 3: #3 is for Ns\n #elif i[0] != 3: # 3 is for Ns\n # seq_position=seq_position + i[1]\n\n\n\n else:\n pass\n\n\n\n\ndef main():\n\n if arguments['']:\n fetch_soft_clipped(arguments[''],arguments['--chromosome'],arguments['--start'],arguments['--end'],arguments[''],arguments[''])\n else:\n\n if arguments['']:\n with open(arguments[\"\"], \"w\") as csv1:\n hpv_variant_table_create(arguments[''], arguments['--chromosome'], arguments['--reference'],\n arguments['--start'], arguments['--end'], csv1)\n else:\n hpv_variant_table_create(arguments[''], arguments['--chromosome'], arguments['--reference'],\n arguments['--start'], arguments['--end'], csv1=None)\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='0.95')\n main()\n", "sub_path": "bin/hpv-variant-call.py", "file_name": "hpv-variant-call.py", "file_ext": "py", "file_size_in_byte": 12869, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "signal.signal", "line_number": 68, "usage_type": "call"}, {"api_name": "signal.SIGPIPE", "line_number": 68, "usage_type": "argument"}, {"api_name": "signal.SIG_DFL", "line_number": 68, "usage_type": "argument"}, {"api_name": "functools.reduce", "line_number": 93, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 96, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 104, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 105, "usage_type": "call"}, {"api_name": "re.search", "line_number": 107, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pysam.AlignmentFile", "line_number": 140, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 143, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 144, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 145, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 146, "usage_type": "call"}, {"api_name": "pysam.AlignmentFile", "line_number": 164, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 179, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 179, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 191, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 192, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pathos.multiprocessing.ProcessPool", "line_number": 205, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 206, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 209, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 219, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pysam.AlignmentFile", "line_number": 247, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 261, "usage_type": "call"}, {"api_name": "re.search", "line_number": 270, "usage_type": "call"}, {"api_name": "re.match", "line_number": 277, "usage_type": "call"}, {"api_name": "re.match", "line_number": 278, "usage_type": "call"}, {"api_name": "re.search", "line_number": 281, "usage_type": "call"}, {"api_name": "Bio.Seq.Seq", "line_number": 285, "usage_type": "call"}, {"api_name": "re.match", "line_number": 289, "usage_type": "call"}, {"api_name": "docopt.docopt", "line_number": 325, "usage_type": "call"}]} +{"seq_id": "551412728", "text": "import cv2\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport glob\r\nimport tensorflow as tf\r\nfrom matplotlib import pyplot\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nEPOCHS = 10\r\nIMG_WIDTH = 30\r\nIMG_HEIGHT = 30\r\nNUM_CATEGORIES = 43\r\nTEST_SIZE = 0.3\r\n\r\n\r\ndef main():\r\n\r\n # Check command-line arguments\r\n if len(sys.argv) not in [2, 3]:\r\n sys.exit(\"Usage: python traffic.py data_directory [model.h5]\")\r\n\r\n # Get image arrays and labels for all image files\r\n images, labels = load_data(sys.argv[1])\r\n\r\n # Split data into training and testing sets\r\n labels = tf.keras.utils.to_categorical(labels)\r\n x_train, x_test, y_train, y_test = train_test_split(\r\n np.array(images), np.array(labels), test_size=TEST_SIZE\r\n )\r\n\r\n # Get a compiled neural network\r\n model = get_model()\r\n\r\n # Fit model on training data\r\n history = model.fit(x_train, y_train, epochs=EPOCHS)\r\n\r\n # Evaluate neural network performance\r\n train_acc = model.evaluate(x_train, y_train, verbose=2)\r\n test_acc = model.evaluate(x_test, y_test, verbose=2) \r\n \r\n # Save model to file\r\n if len(sys.argv) == 3:\r\n filename = sys.argv[2]\r\n model.save(filename)\r\n print(f\"Model saved to {filename}.\")\r\n \r\n # plot loss during training\r\n pyplot.subplot(211)\r\n pyplot.title('Loss')\r\n pyplot.plot(history.history['loss'], label='train')\r\n pyplot.legend()\r\n # plot accuracy during training\r\n pyplot.subplot(212)\r\n pyplot.title('Accuracy')\r\n pyplot.plot(history.history['accuracy'], label='train')\r\n pyplot.tight_layout()\r\n pyplot.legend()\r\n pyplot.show()\r\n\r\n\r\ndef load_data(data_dir):\r\n \"\"\"\r\n Load image data from directory `data_dir`.\r\n\r\n Assume `data_dir` has one directory named after each category, numbered\r\n 0 through NUM_CATEGORIES - 1. Inside each category directory will be some\r\n number of image files.\r\n\r\n Return tuple `(images, labels)`. `images` should be a list of all\r\n of the images in the data directory, where each image is formatted as a\r\n numpy ndarray with dimensions IMG_WIDTH x IMG_HEIGHT x 3. `labels` should\r\n be a list of integer labels, representing the categories for each of the\r\n corresponding `images`.\r\n \"\"\"\r\n #Initiate lists that will be returned as tuple\r\n images=[]\r\n labels=[]\r\n \r\n #Go in directoty and find all images within the directory\r\n #loop over all directories:\r\n for i in range(NUM_CATEGORIES-1):\r\n #add the directory to data-dir and get all the files:\r\n wildcard = os.path.join(data_dir, str(i), \"*\")\r\n files = glob.glob(wildcard) \r\n #loop over each file \r\n for j in files:\r\n #use cv2 to read image as np.ndarray with RGB colours (default)\r\n img = cv2.imread(j)\r\n \r\n #check size\r\n set_size=(IMG_WIDTH,IMG_HEIGHT)\r\n img2=cv2.resize(img,set_size)\r\n\r\n #append label (set to i), and image img2 \r\n labels.append(i)\r\n images.append(img2)\r\n \r\n #return tuple(images,labels) \r\n return images,labels\r\n\r\ndef get_model():\r\n \"\"\"\r\n Returns a compiled convolutional neural network model. Assume that the\r\n `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.\r\n The output layer should have `NUM_CATEGORIES` units, one for each category.\r\n \"\"\"\r\n model = tf.keras.models.Sequential([\r\n\r\n # Convolutional layer. Learn 32 filters using a 3x3 kernel\r\n tf.keras.layers.Conv2D(\r\n 60, (5, 5), activation=\"relu\", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),\r\n \r\n #tf.keras.layers.Conv2D(\r\n # 60, (3, 3), activation=\"relu\"),\r\n\r\n # Max-pooling layer, using 2x2 pool size\r\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\r\n\r\n # Flatten units\r\n tf.keras.layers.Flatten(), \r\n\r\n # Add a hidden layer with dropout\r\n tf.keras.layers.Dense(NUM_CATEGORIES*10, activation=\"relu\"),\r\n tf.keras.layers.Dropout(0.25), \r\n \r\n # Add a hidden layer with dropout\r\n tf.keras.layers.Dense(NUM_CATEGORIES*10, activation=\"relu\"),\r\n tf.keras.layers.Dropout(0.25), \r\n\r\n # Add an output layer with output units for all num categories\r\n tf.keras.layers.Dense(NUM_CATEGORIES-1, activation=\"sigmoid\")\r\n ])\r\n \r\n # Train neural network\r\n opti=tf.keras.optimizers.RMSprop(learning_rate=4e-4) \r\n model.compile(optimizer=opti, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\r\n\r\n return model\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "sub_path": "Week5_NeuralNetworks/traffic/traffic.py", "file_name": "traffic.py", "file_ext": "py", "file_size_in_byte": 4556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 137, "usage_type": "attribute"}]} +{"seq_id": "304692450", "text": "import requests\nfrom lxml import etree\nfrom .util import xml_to_dict\n\ndef request_directory(client):\n # Create XML message\n root = etree.Element('DirectoryReq', nsmap={\n None: 'http://www.betaalvereniging.nl/iDx/messages/Merchant-Acquirer/1.0.0'\n })\n root.attrib['productID'] = 'NL:BVN:eMandatesCore:1.0'\n root.attrib['version'] = '1.0.0'\n root.append(client._timestamp())\n root.append(client._merchant(False))\n\n # Sign XML message and convert it to string\n data = client.sign_to_string(root)\n\n # Post signed XML message to directory endpoint\n r = requests.post(client.directory_url, data=data)\n\n if r.status_code >= 200 and r.status_code <= 399:\n # Parse XML response\n xml_result = etree.fromstring(r.text.encode('utf8'))\n\n # Verify response\n if not client.verify(xml_result):\n return {'is_error': True, 'error_code': 'SO100', 'error_message': 'Invalid response', 'error_detail': 'Signature verification failed'}\n\n # Convert XML object to dictionary\n result = xml_to_dict(xml_result)\n\n # Check for error\n if hasattr(result, 'error'):\n r = result['error']\n r['is_error'] = True\n return r\n\n # Parse response\n response = {\n 'is_error': False,\n 'timestamp': result['create_date_timestamp'],\n 'acquirer_id': result['acquirer']['acquirer_id'],\n 'countries': result['directory']['country']\n }\n\n if not isinstance(response['countries'], list):\n response['countries'] = [response['countries']]\n\n for country in response['countries']:\n country['name'] = country['country_names']\n del country['country_names']\n\n if not isinstance(country['issuer'], list):\n country['issuers'] = [country['issuer']]\n else:\n country['issuers'] = country['issuer']\n\n del country['issuer']\n\n return response\n else:\n # An HTTP error occurred\n return {'is_error': True, 'error_code': 'SO100', 'error_message': 'An unknown error occurred', 'error_detail': 'HTTP request returned error code'}\n", "sub_path": "sepa_netherlands/directory.py", "file_name": "directory.py", "file_ext": "py", "file_size_in_byte": 2200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "lxml.etree.Element", "line_number": 7, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 7, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 23, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 23, "usage_type": "name"}, {"api_name": "util.xml_to_dict", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "43236913", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nControl Parser\n~~~~~~~~~~~~~~\nThis module handles parsing control statement, which add annotations and namespaces to the document.\n\nSee: https://wiki.openbel.org/display/BLD/Control+Records\n\"\"\"\n\nimport logging\nimport re\n\nfrom pyparsing import Suppress, MatchFirst\nfrom pyparsing import pyparsing_common as ppc\n\nfrom .baseparser import BaseParser, quote, delimitedSet, And, oneOf\nfrom .parse_exceptions import *\nfrom .utils import is_int\nfrom ..constants import BEL_KEYWORD_STATEMENT_GROUP, BEL_KEYWORD_CITATION, BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT, \\\n BEL_KEYWORD_ALL, ANNOTATIONS\nfrom ..constants import CITATION_ENTRIES, EVIDENCE, CITATION_TYPES, BEL_KEYWORD_SET, BEL_KEYWORD_UNSET, CITATION\n\nlog = logging.getLogger('pybel')\n\n\nclass ControlParser(BaseParser):\n def __init__(self, annotation_dicts=None, annotation_expressions=None, citation_clearing=True):\n \"\"\"Builds parser for BEL valid_annotations statements\n\n :param annotation_dicts: A dictionary of {annotation: set of valid values} for parsing\n :type annotation_dicts: dict\n :param annotation_expressions: A dictionary of {annotation: regular expression string}\n :type annotation_expressions: dict\n :param citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations?\n :type citation_clearing: bool\n \"\"\"\n\n self.citation_clearing = citation_clearing\n\n self.valid_annotations = {} if annotation_dicts is None else annotation_dicts\n self.annotations_re = {} if annotation_expressions is None else annotation_expressions\n self.annotations_re_compiled = {k: re.compile(v) for k, v in self.annotations_re.items()}\n\n self.statement_group = None\n self.citation = {}\n self.evidence = None\n self.annotations = {}\n\n annotation_key = ppc.identifier('key').setParseAction(self.handle_annotation_key)\n\n self.set_statement_group = And([Suppress(BEL_KEYWORD_STATEMENT_GROUP), Suppress('='), quote('group')])\n self.set_statement_group.setParseAction(self.handle_set_statement_group)\n\n self.set_citation = And([Suppress(BEL_KEYWORD_CITATION), Suppress('='), delimitedSet('values')])\n self.set_citation.setParseAction(self.handle_set_citation)\n\n supporting_text_tags = oneOf([BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT])\n self.set_evidence = And([Suppress(supporting_text_tags), Suppress('='), quote('value')])\n self.set_evidence.setParseAction(self.handle_set_evidence)\n\n set_command_prefix = And([annotation_key('key'), Suppress('=')])\n self.set_command = set_command_prefix + quote('value')\n self.set_command.setParseAction(self.handle_set_command)\n\n self.set_command_list = set_command_prefix + delimitedSet('values')\n self.set_command_list.setParseAction(self.handle_set_command_list)\n\n self.unset_command = annotation_key('key')\n self.unset_command.addParseAction(self.handle_unset_command)\n\n self.unset_evidence = supporting_text_tags(EVIDENCE)\n self.unset_evidence.setParseAction(self.handle_unset_evidence)\n\n self.unset_citation = Suppress(BEL_KEYWORD_CITATION)\n self.unset_citation.setParseAction(self.handle_unset_citation)\n\n self.unset_statement_group = Suppress(BEL_KEYWORD_STATEMENT_GROUP)\n self.unset_statement_group.setParseAction(self.handle_unset_statement_group)\n\n self.unset_list = delimitedSet('values')\n self.unset_list.setParseAction(self.handle_unset_list)\n\n self.unset_all = Suppress(BEL_KEYWORD_ALL)\n self.unset_all.setParseAction(self.handle_unset_all)\n\n set_tag = Suppress(BEL_KEYWORD_SET)\n unset_tag = Suppress(BEL_KEYWORD_UNSET)\n\n self.set_statements = set_tag + MatchFirst([\n self.set_statement_group,\n self.set_citation,\n self.set_evidence,\n self.set_command,\n self.set_command_list,\n ])\n\n self.unset_statements = unset_tag + MatchFirst([\n self.unset_all,\n self.unset_citation,\n self.unset_evidence,\n self.unset_statement_group,\n self.unset_command,\n self.unset_list\n ])\n\n self.language = self.set_statements | self.unset_statements\n\n BaseParser.__init__(self, self.language)\n\n def validate_annotation_key(self, key):\n if key not in self.valid_annotations and key not in self.annotations_re_compiled:\n raise UndefinedAnnotationWarning(key)\n\n def validate_value(self, key, value):\n if key in self.valid_annotations and value not in self.valid_annotations[key]:\n raise IllegalAnnotationValueWarning(value, key)\n elif key in self.annotations_re_compiled and not self.annotations_re_compiled[key].match(value):\n raise MissingAnnotationRegexWarning(value, key)\n\n def handle_annotation_key(self, s, l, tokens):\n \"\"\"Called on all annotation keys before parsing to validate that it's either enumerated or as a regex\"\"\"\n key = tokens['key']\n\n if self.citation_clearing and not self.citation:\n raise MissingCitationException(s)\n\n self.validate_annotation_key(key)\n return tokens\n\n def handle_set_statement_group(self, s, l, tokens):\n self.statement_group = tokens['group']\n return tokens\n\n def handle_set_citation(self, s, l, tokens):\n self.clear_citation()\n\n values = tokens['values']\n\n if not (3 <= len(values) <= 6):\n raise InvalidCitationException(s)\n\n if values[0] not in CITATION_TYPES:\n raise InvalidCitationType(values[0])\n\n if values[0] == 'PubMed' and not is_int(values[2]):\n raise InvalidPubMedIdentifierWarning(values[2])\n\n self.citation = dict(zip(CITATION_ENTRIES, values))\n\n return tokens\n\n def handle_set_evidence(self, s, l, tokens):\n self.evidence = tokens['value']\n return tokens\n\n def handle_set_command(self, s, l, tokens):\n key = tokens['key']\n value = tokens['value']\n\n self.validate_value(key, value)\n\n self.annotations[key] = value\n return tokens\n\n def handle_set_command_list(self, s, l, tokens):\n key = tokens['key']\n values = tokens['values']\n\n for value in values:\n self.validate_value(key, value)\n\n self.annotations[key] = set(values)\n return tokens\n\n def handle_unset_statement_group(self, s, l, tokens):\n if self.statement_group is None:\n raise MissingAnnotationKeyWarning(BEL_KEYWORD_STATEMENT_GROUP)\n self.statement_group = None\n return tokens\n\n def handle_unset_citation(self, s, l, tokens):\n if not self.citation:\n raise MissingAnnotationKeyWarning(BEL_KEYWORD_CITATION)\n\n self.clear_citation()\n\n return tokens\n\n def handle_unset_evidence(self, s, l, tokens):\n if self.evidence is None:\n raise MissingAnnotationKeyWarning(tokens[EVIDENCE])\n self.evidence = None\n return tokens\n\n def validate_unset_command(self, key):\n if key not in self.annotations:\n raise MissingAnnotationKeyWarning(key)\n\n def handle_unset_command(self, s, l, tokens):\n key = tokens['key']\n self.validate_unset_command(key)\n del self.annotations[key]\n return tokens\n\n def handle_unset_list(self, s, l, tokens):\n for key in tokens['values']:\n if key in {BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT}:\n self.evidence = None\n else:\n self.validate_unset_command(key)\n del self.annotations[key]\n\n return tokens\n\n def handle_unset_all(self, s, l, tokens):\n self.clear()\n return tokens\n\n def get_annotations(self):\n \"\"\"\n\n :return: The currently stored BEL annotations\n :rtype: dict\n \"\"\"\n return {\n EVIDENCE: self.evidence,\n CITATION: self.citation.copy(),\n ANNOTATIONS: self.annotations.copy()\n }\n\n def clear_citation(self):\n self.citation.clear()\n\n if self.citation_clearing:\n self.evidence = None\n self.annotations.clear()\n\n def clear(self):\n \"\"\"Clears the statement_group, citation, evidence, and annotations\"\"\"\n self.statement_group = None\n self.citation.clear()\n self.evidence = None\n self.annotations.clear()\n", "sub_path": "src/pybel/parser/parse_control.py", "file_name": "parse_control.py", "file_ext": "py", "file_size_in_byte": 8538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "baseparser.BaseParser", "line_number": 27, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 43, "usage_type": "call"}, {"api_name": "pyparsing.pyparsing_common.identifier", "line_number": 50, "usage_type": "call"}, {"api_name": "pyparsing.pyparsing_common", "line_number": 50, "usage_type": "name"}, {"api_name": "baseparser.And", "line_number": 52, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 52, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_STATEMENT_GROUP", "line_number": 52, "usage_type": "argument"}, {"api_name": "baseparser.quote", "line_number": 52, "usage_type": "call"}, {"api_name": "baseparser.And", "line_number": 55, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 55, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_CITATION", "line_number": 55, "usage_type": "argument"}, {"api_name": "baseparser.delimitedSet", "line_number": 55, "usage_type": "call"}, {"api_name": "baseparser.oneOf", "line_number": 58, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_EVIDENCE", "line_number": 58, "usage_type": "name"}, {"api_name": "constants.BEL_KEYWORD_SUPPORT", "line_number": 58, "usage_type": "name"}, {"api_name": "baseparser.And", "line_number": 59, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 59, "usage_type": "call"}, {"api_name": "baseparser.quote", "line_number": 59, "usage_type": "call"}, {"api_name": "baseparser.And", "line_number": 62, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 62, "usage_type": "call"}, {"api_name": "baseparser.quote", "line_number": 63, "usage_type": "call"}, {"api_name": "baseparser.delimitedSet", "line_number": 66, "usage_type": "call"}, {"api_name": "constants.EVIDENCE", "line_number": 72, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 75, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_CITATION", "line_number": 75, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 78, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_STATEMENT_GROUP", "line_number": 78, "usage_type": "argument"}, {"api_name": "baseparser.delimitedSet", "line_number": 81, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 84, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_ALL", "line_number": 84, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 87, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_SET", "line_number": 87, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 88, "usage_type": "call"}, {"api_name": "constants.BEL_KEYWORD_UNSET", "line_number": 88, "usage_type": "argument"}, {"api_name": "pyparsing.MatchFirst", "line_number": 90, "usage_type": "call"}, {"api_name": "pyparsing.MatchFirst", "line_number": 98, "usage_type": "call"}, {"api_name": "baseparser.BaseParser.__init__", "line_number": 109, "usage_type": "call"}, {"api_name": "baseparser.BaseParser", "line_number": 109, "usage_type": "name"}, {"api_name": "constants.CITATION_TYPES", "line_number": 143, "usage_type": "name"}, {"api_name": "utils.is_int", "line_number": 146, "usage_type": "call"}, {"api_name": "constants.CITATION_ENTRIES", "line_number": 149, "usage_type": "argument"}, {"api_name": "constants.BEL_KEYWORD_STATEMENT_GROUP", "line_number": 178, "usage_type": "argument"}, {"api_name": "constants.BEL_KEYWORD_CITATION", "line_number": 184, "usage_type": "argument"}, {"api_name": "constants.EVIDENCE", "line_number": 192, "usage_type": "name"}, {"api_name": "constants.BEL_KEYWORD_EVIDENCE", "line_number": 208, "usage_type": "name"}, {"api_name": "constants.BEL_KEYWORD_SUPPORT", "line_number": 208, "usage_type": "name"}, {"api_name": "constants.EVIDENCE", "line_number": 227, "usage_type": "name"}, {"api_name": "constants.CITATION", "line_number": 228, "usage_type": "name"}, {"api_name": "constants.ANNOTATIONS", "line_number": 229, "usage_type": "name"}]} +{"seq_id": "585353658", "text": "import asyncio\nimport logging\n\nfrom django.utils import timezone\n\nfrom django_hpc_job_controller.client.core.messaging.message import Message\nfrom django_hpc_job_controller.client.scheduler.status import JobStatus\nfrom django_hpc_job_controller.server.utils import get_job_submission_lock, get_job_model_instance\n\n\n# Get the logger\nlogger = logging.getLogger(__name__)\n\n\nasync def handle_message(sock, token, queue, message):\n \"\"\"\n Handles an incoming message from a non file websocket\n\n :param sock: The socket that the message was received from\n :param token: The token used for this websocket connection\n :param queue: The queue to send messages on\n :param message: The raw message received\n :return: Nothing\n \"\"\"\n # Convert the raw message to a Message object\n msg = Message(data=message)\n\n # Get the message id\n msg_id = msg.pop_uint()\n\n if msg_id == Message.SUBMIT_JOB:\n # Acquire the job submission lock\n with get_job_submission_lock():\n # Clean up the django connection\n from django.db import connection\n connection.close()\n\n # Look up the job\n job = get_job_model_instance().objects.get(id=msg.pop_uint(), job_status=JobStatus.SUBMITTING)\n\n # Mark the job submitted\n job.job_status = JobStatus.SUBMITTED\n\n # Set the submitted time\n job.job_submitted_time = timezone.now()\n\n # Save the job\n job.save()\n\n elif msg_id == Message.UPDATE_JOB:\n # Clean up the django connection\n from django.db import connection\n connection.close()\n \n # Look up the job we are updating the status of\n job = get_job_model_instance().objects.get(id=msg.pop_uint())\n\n # Check that the jobs cluster matches the tokens cluster\n if job.cluster != token.cluster:\n logger.info(\"A different cluster ({} (id: {})) tried to update a job ({} (id: {})) it does not own!\".format(\n str(token.cluster), token.cluster.id, str(job), job.id\n ))\n return\n\n # Set the new status\n job.job_status = msg.pop_uint()\n\n # Set the extra details if there are any\n job.job_details = (job.job_details or '') + \"{}: New status: {}\\n{}\\n\\n\".format(\n timezone.now(), job.job_status, msg.pop_string() or 'No detail')\n\n # Check if we need to update various time stamps\n if job.job_status == JobStatus.QUEUED:\n job.job_queued_time = timezone.now()\n\n if job.job_status == JobStatus.RUNNING:\n if not job.job_queued_time:\n job.job_queued_time = timezone.now()\n job.job_running_time = timezone.now()\n\n if job.job_status in [JobStatus.CANCELLED, JobStatus.ERROR, JobStatus.WALL_TIME_EXCEEDED,\n JobStatus.OUT_OF_MEMORY, JobStatus.COMPLETED]:\n if not job.job_queued_time:\n job.job_queued_time = timezone.now()\n if not job.job_running_time:\n job.job_running_time = timezone.now()\n job.job_finished_time = timezone.now()\n\n # Save the job\n job.save()\n\n elif msg_id == Message.TRANSMIT_ASSURED_RESPONSE_WEBSOCKET_MESSAGE:\n # Create the socket\n from django_hpc_job_controller.server.settings import HPC_IPC_UNIX_SOCKET\n reader, writer = await asyncio.open_unix_connection(HPC_IPC_UNIX_SOCKET + \".\" + msg.pop_string())\n\n # Send the encapsulated message\n data = msg.pop_bytes()\n from django_hpc_job_controller.server.server import send_message_writer\n send_message_writer(data, writer, True)\n", "sub_path": "server/cluster_manager.py", "file_name": "cluster_manager.py", "file_ext": "py", "file_size_in_byte": 3680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message", "line_number": 26, "usage_type": "call"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message.SUBMIT_JOB", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message", "line_number": 31, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.server.utils.get_job_submission_lock", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.connection.close", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 36, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.server.utils.get_job_model_instance", "line_number": 39, "usage_type": "call"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.SUBMITTING", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 39, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.SUBMITTED", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 42, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 45, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 45, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message.UPDATE_JOB", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.connection.close", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 53, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.server.utils.get_job_model_instance", "line_number": 56, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 70, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 70, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.QUEUED", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 73, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 74, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 74, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.RUNNING", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 76, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 78, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 78, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 79, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 79, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.CANCELLED", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 81, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.ERROR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.WALL_TIME_EXCEEDED", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.OUT_OF_MEMORY", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus", "line_number": 82, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.scheduler.status.JobStatus.COMPLETED", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.utils.timezone.now", "line_number": 84, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 84, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 86, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 86, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 87, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 87, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message.TRANSMIT_ASSURED_RESPONSE_WEBSOCKET_MESSAGE", "line_number": 92, "usage_type": "attribute"}, {"api_name": "django_hpc_job_controller.client.core.messaging.message.Message", "line_number": 92, "usage_type": "name"}, {"api_name": "asyncio.open_unix_connection", "line_number": 95, "usage_type": "call"}, {"api_name": "django_hpc_job_controller.server.settings.HPC_IPC_UNIX_SOCKET", "line_number": 95, "usage_type": "name"}, {"api_name": "django_hpc_job_controller.server.server.send_message_writer", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "598464006", "text": "import urllib.request\r\nimport json\r\n\r\nfrom file_names import *\r\nfrom file_utils import append_to_file\r\n\r\nif __name__ == '__main__':\r\n print('Loading list of groups to file \\'' + GROUPS_FILE_NAME + '\\'')\r\n limit = 100\r\n offset = 0\r\n counter = 0\r\n while True:\r\n url = \"http://api.rozklad.org.ua/v2/groups/?filter={'limit':\" + str(limit) + \",'offset':\" + str(offset) + \"}\"\r\n\r\n req = urllib.request.Request(url)\r\n resp = urllib.request.urlopen(req)\r\n resp_data = resp.read()\r\n\r\n data = json.loads(resp_data.decode('utf-8'))\r\n data = data['data']\r\n\r\n if data is None: break\r\n\r\n for group in data:\r\n group_full_name = group['group_full_name']\r\n group_id = group['group_id']\r\n counter += 1\r\n try:\r\n print(\"{}) {}\".format(counter, group_full_name))\r\n except UnicodeEncodeError:\r\n print(str(counter) + \")\\t\" + \"UnicodeEncodeError\")\r\n\r\n append_to_file(GROUPS_FILE_NAME, (str(group_id) + '.' + group_full_name + '\\n'))\r\n\r\n offset += limit\r\n", "sub_path": "DBfilling/create_list_of_groups.py", "file_name": "create_list_of_groups.py", "file_ext": "py", "file_size_in_byte": 1103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.request.request.Request", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 15, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 16, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "file_utils.append_to_file", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "49950942", "text": "import mysql.connector\nfrom collections import deque\nimport app\nimport os\nfrom datetime import datetime\nfrom itertools import cycle\n\n\nclass Database(object):\n def __init__(self):\n self.user = \"root\"\n self.pwd = \"mysql\"\n self.db = None\n self.createConnection()\n\n def createConnection(self):\n self.db = mysql.connector.connect(\n host=self.getHost(),\n user=self.user,\n passwd=self.pwd,\n database=\"proyecto\",\n port=3306\n )\n # app.logger().info(\"Connected!\")\n\n def getHost(self):\n docker = os.environ.get('DOCKER', False)\n if docker:\n return \"db\"\n else:\n return \"localhost\"\n\n def checkConnection(self):\n try:\n self.db.ping(reconnect=True)\n except Exception as err:\n app.logger().error(\"error on ping db: reconnect failed-- {}\".format(err))\n\n def createCursor(self, cursor_type=None):\n self.checkConnection()\n return self.db.cursor(cursor_type)\n\n # Busca info de los robots\n def getAllRobots(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from robot WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getallrobots -- {}\".format(err))\n return None, False\n\n def getRobot(self, id_r):\n try:\n cursor = self.createCursor()\n cursor.execute(\"SELECT * FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\", (id_r,))\n res = cursor.fetchone()\n cursor.close()\n if not res:\n return None, True\n res = {cursor.description[pos][0]: value for pos, value in enumerate(res)}\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobot -- {}\".format(err))\n return None, False\n\n def getRobotLoc(self, id_r):\n try:\n cursor = self.createCursor()\n cursor.execute(\"SELECT loc1, loc2 FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\", (id_r,))\n pos = cursor.fetchone()\n cursor.close()\n if pos:\n pos = list(pos)\n return pos, True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return None, False\n\n def insertRobot(self, pos):\n id_r = 0\n cursor = self.createCursor()\n try:\n sql = \"INSERT INTO robot (loc1, loc2, estado) VALUES (%s, %s, UCASE(%s))\"\n val = (pos[0], pos[1], 'LIBRE',)\n cursor.execute(sql, val)\n id_r = cursor.lastrowid\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertRobot -- {}\".format(err))\n return None, False\n finally:\n cursor.close()\n return id_r, True\n\n def deleteRobot(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE robot SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en DELETE eliminarRobot -- {}\".format(err))\n self.db.rollback()\n return False\n\n def updateRobot(self, id_r, estado):\n try:\n cursor = self.createCursor()\n if id_r is not None:\n sql = \"UPDATE robot SET estado = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(sql, (estado, id_r))\n else:\n sql = \"UPDATE robot SET estado = %s WHERE estado <> 'ELIMINADO'\"\n cursor.execute(sql, (estado, ))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateRobot -- {}\".format(err))\n self.db.rollback()\n return False\n\n def updatePid(self, pid, id_r):\n sql = \"UPDATE robot SET pid = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (pid, id_r)\n cursor = self.createCursor()\n try:\n cursor.execute(sql, val)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertRobot -- {}\".format(err))\n return False\n finally:\n cursor.close()\n return True\n\n def getPid(self, id_r):\n cursor = self.createCursor()\n try:\n sql = \"SELECT pid, estado FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (str(id_r),)\n cursor.execute(sql, val)\n res = cursor.fetchone()\n cursor.close()\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return None, False\n\n def updateRobotPath(self, path, id_r):\n sql = \"UPDATE robot SET camino = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (path, id_r)\n cursor = self.createCursor()\n try:\n cursor.execute(sql, val)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT updateRobotPath -- {}\".format(err))\n return False\n finally:\n cursor.close()\n return True\n\n def updaterobotpos(self, pos, id_r):\n sql = \"UPDATE robot SET actual = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (pos, id_r)\n try:\n cursor = self.createCursor()\n cursor.execute(sql, val)\n self.db.commit()\n cursor.close()\n except Exception as err:\n app.logger().exception(\"Error en INSERT updateRobotPath -- {}\".format(err))\n return False\n return True\n\n def towerAvailable(self, id_t):\n try:\n cursor = self.createCursor()\n sql = \"SELECT estado from torre WHERE id = %s\"\n val = (id_t,)\n cursor.execute(sql, val)\n result = cursor.fetchone()\n cursor.close()\n if result[0] == \"LIBRE\":\n return True\n else:\n return False\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return False\n\n def moveTowers(self, movments):\n for i in movments: # Por cada movimiento lo traduzco en la base de datos y en la matriz.\n print(\"movimiento\")\n return {\n \"Status\": 200,\n \"Message\": \"to bien\"\n }\n\n def getTowerLoc(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT T.loc1, T.loc2 FROM torre T INNER JOIN articulo a on T.id = a.id_torre INNER JOIN pedidos p on a.id = p.id_articulo WHERE p.id_robot = %s AND p.estado = 'LISTO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n a = cursor.fetchall()\n if not a:\n cursor.execute(\"SELECT T.loc1, T.loc2 FROM torre T INNER JOIN pedidos p on T.id = p.id_torre WHERE p.id_robot = %s AND p.estado = 'LISTO'\", (id_r,))\n a = cursor.fetchall()\n cursor.close()\n if a:\n return list(a[0]), True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en select getTowerLoc -- {}\".format(err))\n return None, False\n\n def getAllTowers(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from torre WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getAllTowers -- {}\".format(err))\n return None, False\n\n def insertTowers(self, matrix):\n try:\n cursor = self.db.cursor(buffered=True)\n for fila in range(len(matrix)):\n for columna in range(len(matrix[fila])):\n if matrix[fila][columna] == 1:\n sql = \"INSERT INTO torre (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (fila, columna, 'LIBRE')\n cursor.execute(sql, val)\n elif matrix[fila][columna] == 2:\n sql = \"INSERT INTO plataforma_origen (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (fila, columna, 'LIBRE')\n cursor.execute(sql, val)\n self.db.commit()\n cursor.execute(\"SELECT id from plataforma_origen \")\n r = cursor.fetchall()\n with open('platforms.txt', 'w') as f:\n for i in r:\n f.write(f'{i[0]}\\n')\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertTowers -- {}\".format(err))\n return False\n\n def getPlatfoms(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from plataforma_origen WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getPlatforms -- {}\".format(err))\n return None, False\n\n def getPlatformLoc(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT P.loc1, P.loc2 FROM plataforma_origen P INNER JOIN pedidos p2 on P.id = p2.id_plataforma WHERE p2.id_robot = %s AND p2.estado = 'ENPROGRESO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n a = cursor.fetchall()\n cursor.close()\n if a:\n return list(a[0]), True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPlatformLoc -- {}\".format(err))\n return None, False\n\n def getPlatformState(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT P.estado FROM plataforma_origen P INNER JOIN pedidos p2 on P.id = p2.id_plataforma WHERE id_robot = %s AND p2.estado != 'FINALIZADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n res = cursor.fetchone()\n cursor.close()\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return None, False\n\n def leavePlatform(self, id_plat, id_pedido):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'LISTO' where id = %s\"\n cursor.execute(sql, (id_pedido,))\n self.db.commit()\n sql = \"SELECT id FROM pedidos WHERE estado = 'EN TERMINAL' AND id_plataforma = %s\"\n cursor.execute(sql, (id_plat,))\n pedidos = cursor.fetchall()\n cursor.close()\n if pedidos:\n return True, [i[0] for i in pedidos]\n else:\n return True, None\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return False, None\n\n def leavePlatform2(self, id_pedido, id_plat):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'LISTO' where id= %s AND estado = %s\"\n val = (id_pedido, 'EN TERMINAL')\n cursor.execute(sql, val)\n self.db.commit()\n # Si no hay rows afectadas por el update significa que el pedido no estaba en la plataforma porque no tenia estado EN TERMINAL\n if cursor.rowcount == 0:\n return True, None, None\n sql = \"SELECT id FROM pedidos WHERE estado = 'EN TERMINAL' AND id_plataforma = %s\"\n cursor.execute(sql, (id_plat,))\n pedidos = cursor.fetchall()\n cursor.close()\n if pedidos:\n return True, True, [i[0] for i in pedidos]\n return True, True, None\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return False, False\n\n def platformAvailable(self, id_p):\n try:\n cursor = self.createCursor()\n sql = \"SELECT id, estado from plataforma_origen WHERE id = %s\"\n val = (id_p,)\n cursor.execute(sql, val)\n result = cursor.fetchone()\n if result[1] == \"LIBRE\":\n return True\n else:\n return False\n except Exception as err:\n app.logger().exception(\"Error en SELECT platformAvailable -- {}\".format(err))\n return False\n\n # Cuando el robot llega a la plataforma\n def arrivedPlatform(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'EN TERMINAL' WHERE id_robot = %s AND estado <> 'FINALIZADO' AND estado <> 'ELIMINADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en SELECT arrivedPlatform -- {}\".format(err))\n return False\n\n # Cuando el robot se va de la plataforma\n def leavedPlatform(self, id_r):\n try:\n # Marco el pedido como actualizado\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'FINALIZADO' WHERE id_robot = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(sql, (id_r,))\n # Marco el articulo como entregado\n sql = \"UPDATE articulo a INNER JOIN pedidos p ON a.id = p.id_articulo SET a.estado = 'ENTREGADO' WHERE p.id_robot = %s AND a.estado = 'OCUPADO'\"\n cursor.execute(sql, (id_r,))\n # Marco la plataforma de origen como libre\n sql = \"UPDATE plataforma_origen INNER JOIN pedidos p on plataforma_origen.id = p.id_plataforma SET \" \\\n \"plataforma_origen.estado = 'LIBRE' WHERE p.id_robot = %s \"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en UPDATE leavedPlatform -- {}\".format(err))\n return False\n\n # Cuando el robot deja la torre donde estaba\n def finishedJob(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE robot SET estado = 'LIBRE' where id= %s AND estado <> 'ELIMINADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n sql = \"UPDATE torre INNER JOIN articulo a on torre.id = a.id_torre INNER JOIN pedidos p on a.id = \" \\\n \"p.id_articulo SET torre.estado = 'LIBRE' WHERE p.id_robot = %s AND p.estado = 'FINALIZADO' \"\n cursor.execute(sql, val)\n if cursor.rowcount == 0:\n sql = \"UPDATE torre t INNER JOIN pedidos p on t.id = p.id_torre SET t.estado = 'LIBRE' WHERE p.id_robot = %s AND p.estado = 'FINALIZADO'\"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en UPDATE finishedJob -- {}\".format(err))\n return False\n\n def getPedidos(self, id=None, id_orden_compra=None, id_articulo=None, id_plataforma=None, id_robot=None,\n estado=None):\n try:\n cursor = self.createCursor()\n sql = \"SELECT p.id , p.id_orden_compra, p.id_articulo, p2.name, p.estado, p.id_robot, p.id_torre 'id_torre1', a.id_torre FROM pedidos as p LEFT JOIN \" \\\n \"articulo a on p.id_articulo = a.id LEFT JOIN producto p2 on a.id_producto = p2.id\"\n where = []\n params = {}\n if id is not None:\n where.append(\"p.id = %(id)s\")\n params['id'] = id\n if id_orden_compra is not None:\n where.append(\"id_orden_compra = %(id_orden_compra)s\")\n params['id_orden_compra'] = id_orden_compra\n if id_articulo is not None:\n where.append(\"id_articulo = %(id_articulo)s\")\n params['id_articulo'] = id_articulo\n if id_plataforma is not None:\n where.append(\"id_plataforma = %(id_plataforma)s\")\n params['id_plataforma'] = id_plataforma\n if id_robot is not None:\n where.append(\"id_robot = %(id_robot)s\")\n params['id_robot'] = id_robot\n if estado is not None:\n where.append(\"p.estado = %(estado)s\")\n params['estado'] = estado\n where.append(\"p.estado <> 'ELIMINADO'\")\n if where:\n sql = '{} WHERE {}'.format(sql, ' AND '.join(where))\n cursor.execute(sql, params)\n row_headers = [x[0] for x in cursor.description]\n rv = cursor.fetchall()\n arts = []\n for result in rv:\n arts.append(dict(zip(row_headers, result)))\n cursor.close()\n return arts, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPedidos -- {}\".format(err))\n return None, False\n\n def insertPedido(self, codorder, codart):\n platforms_file = \"platforms.txt\"\n try:\n cursor = self.createCursor()\n # Verifico que ninguno de los articulos solicitados no haya sido ocupado\n id_list = ','.join(['%s'] * len(codart))\n cursor.execute(\"SELECT id FROM articulo WHERE id IN (%s) AND estado <> 'LIBRE'\" % id_list, tuple(codart))\n articulos_ocupados = cursor.fetchall()\n if not articulos_ocupados:\n with open(platforms_file, 'r') as f:\n platforms = deque(f.read().splitlines())\n for art in codart:\n sql = \"INSERT INTO pedidos (id_orden_compra, id_articulo, id_plataforma, estado, fecha_creacion) VALUES (%s, %s, %s, %s, %s)\"\n val = (codorder, art, platforms[0], \"ENCOLADO\", datetime.now())\n cursor.execute(sql, val)\n sql = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n cursor.execute(sql, (art,))\n self.db.commit()\n else:\n return articulos_ocupados, True\n cursor.close()\n except Exception as err:\n with open(platforms_file, 'w') as f: # Guardo el archivo como estaba antes porque fallo el insert\n for p in platforms:\n f.write(\"%s\\n\" % p)\n self.db.rollback()\n app.logger().exception(\"Error en INSERT insertPedidos -- {}\".format(err))\n return art, False\n platforms.rotate(-1) # Roto el archivo Round Robin y lo guardo\n with open(platforms_file, 'w') as f:\n for s in platforms:\n f.write(\"%s\\n\" % s)\n return None, True\n\n def insertPedidoTorre(self, id_plat, id_torre):\n try:\n cursor = self.createCursor()\n sql = \"INSERT INTO pedidos (id_orden_compra, id_articulo, id_plataforma, estado, id_torre, fecha_creacion) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (None, None, id_plat, \"ENCOLADO\", id_torre, datetime.now())\n cursor.execute(sql, val)\n self.db.commit()\n return True\n except Exception as err:\n self.db.rollback()\n print(\"erorr\")\n app.logger().exception(\"Error en INSERT insertPedidoTorre -- {}\".format(err))\n return False\n\n def deletePedido(self, id):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM pedidos WHERE id = %s AND estado <> 'ENCOLADO' AND estado <> 'FINALIZADO'\"\n cursor.execute(sql, (id,))\n if cursor.fetchall():\n return \"pend\"\n sql = \"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id,))\n if cursor.rowcount == 0:\n res = None\n else:\n res = True\n cursor.execute(\n \"UPDATE articulo a INNER JOIN pedidos p ON a.id = p.id_articulo SET a.estado = 'LIBRE' WHERE p.id=%s\",\n (id,))\n self.db.commit()\n cursor.close()\n return res\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en DELETE deletePedido -- {}\".format(err))\n return False\n\n def getPedidoState(self, id_r):\n # Busco el estado de todos los pedidos que tiene asignado el robot. Si alguno esta en terminal signfica que no se puede ir de la plataforma todavia\n try:\n cursor = self.createCursor()\n sql = \"SELECT estado FROM pedidos WHERE id_robot = %s AND estado = 'EN TERMINAL'\"\n cursor.execute(sql, (id_r,))\n res = cursor.fetchall()\n cursor.close()\n if res:\n return False, True\n else:\n return True, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPedidoState -- {}\".format(err))\n return None, False\n\n # def deleteProduct(self, codart):\n # try:\n # cursor = self.createCursor()\n # # sql = \"DELETE FROM producto WHERE id = %s\"\n # sql = \"UPDATE producto SET \"\n # val = (codart,)\n # cursor.execute(sql, val)\n # self.db.commit()\n # cursor.close()\n # return True\n # except Exception as err:\n # app.logger().exception(\"Error en DELETE deleteProduct -- {}\".format(err))\n # return False\n\n def insertArticlesCsv(self, csv):\n try:\n cursor = self.createCursor()\n cont = 0\n for row in csv:\n cursor.execute(\"INSERT INTO articulo (id_torre, id_producto, estado) VALUES(%s, %s, UCASE(%s))\", row)\n cont = cont + 1\n self.db.commit()\n cursor.close()\n return (None, cont), True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertArticlesCsv -- {}\".format(err))\n self.db.rollback()\n return (row, cont), False\n\n def insertArticles(self, art):\n try:\n cursor = self.createCursor()\n sql = \"INSERT INTO articulo (id_torre, id_producto, estado) VALUES (%s, %s, UCASE(%s))\"\n val = (art[\"id_torre\"], art[\"id_producto\"], art[\"estado\"])\n cursor.execute(sql, val)\n self.db.commit()\n id_a = cursor.lastrowid\n cursor.close()\n return id_a, True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertArticles -- {}\".format(err))\n self.db.rollback()\n return None, False\n\n def getArticles(self, id=None, id_torre=None, id_producto=None, estado=None):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM articulo\"\n where = []\n params = {}\n if id is not None:\n where.append(\"id = %(id)s\")\n params['id'] = id\n if id_torre is not None:\n where.append(\"id_torre = %(id_torre)s\")\n params['id_torre'] = id_torre\n if id_producto is not None:\n where.append(\"id_producto = %(id_producto)s\")\n params['id_producto'] = id_producto\n if estado is not None:\n where.append(\"estado = %(estado)s\")\n params['estado'] = estado\n where.append(\"estado <> 'ELIMINADO'\")\n if where:\n sql = '{} WHERE {}'.format(sql, ' AND '.join(where))\n cursor.execute(sql, params)\n row_headers = [x[0] for x in cursor.description]\n res = cursor.fetchall()\n arts = []\n for result in res:\n arts.append(dict(zip(row_headers, result)))\n cursor.close()\n return arts, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getArticles -- {}\".format(err))\n return None, False\n\n def deleteArticle(self, id):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM pedidos WHERE id_articulo = %s AND estado != 'ENCOLADO'\"\n cursor.execute(sql, (id,))\n pedidos = cursor.fetchall()\n if pedidos:\n return \"pending\", [i[0] for i in pedidos]\n sql = \"UPDATE articulo SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id,))\n if cursor.rowcount == 0:\n res = None\n else:\n cursor.execute(\"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id_articulo = %s\", (id,))\n res = True\n self.db.commit()\n cursor.close()\n return res, None\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en DELETE deleteArticle -- {}\".format(err))\n return False, None\n\n # Busca el proximo pedido y lockea los recursos que se van a utilizar. Retorna a donde hay que ir\n def getpath(self, id_r):\n self.db.autocommit = True # SE CAMBIO A COMO ESTABA ANTES, VER QUE NO HAYA ROTO NADA\n cursor = self.createCursor()\n cursor.execute(\"SELECT * FROM pedidos WHERE estado='ENCOLADO'\") # Busco el primer pedido encolado\n pedido = cursor.fetchall()\n cont = 0 # Contador para iterar por los pedidos\n # Busco los datos de la torre donde esta el articulo del primer pedido\n while cont != len(pedido): # Mientras que haya resultados para iterar\n if self.platformAvailable(pedido[cont][3]): # Si la plataforma esta libre\n if pedido[cont][2] is None: # Si el codigo articulo es Null significa que es un pedido para buscar torre\n cursor.execute(\"SELECT t.id, t.loc1, t.loc2, t.estado FROM torre as t WHERE t.id = %s\", (pedido[cont][6],))\n else:\n cursor.execute(\n \"SELECT t.id, t.loc1, t.loc2, t.estado FROM torre as t INNER JOIN articulo a on t.id = a.id_torre WHERE a.id = %s\",\n (pedido[cont][2],))\n torre = cursor.fetchone()\n if torre[3] == \"LIBRE\": # Si la torre esta libre\n try:\n sql = \"UPDATE pedidos SET id_robot = %s, estado = 'ENPROGRESO' WHERE id = %s\"\n val = (id_r, pedido[cont][0])\n sql2 = \"UPDATE torre SET estado = 'OCUPADO' WHERE id = %s\"\n val2 = (torre[0],)\n sql3 = \"UPDATE robot SET estado = 'OCUPADO' WHERE id = %s\"\n val3 = (id_r,)\n sql4 = \"UPDATE plataforma_origen SET estado = 'OCUPADO' WHERE id = %s\"\n val4 = (pedido[cont][3],)\n cursor.execute(sql, val)\n cursor.execute(sql2, val2)\n cursor.execute(sql3, val3)\n cursor.execute(sql4, val4)\n if pedido[cont][2] is not None: # Si el pedido es de traer torre no tengo que hacer nada con los articulos\n sql5 = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n val5 = (pedido[cont][2],)\n cursor.execute(sql5, val5)\n # Marco los demas articulos que estan en la misma torre y misma orden de compra\n sql = \"SELECT a.id FROM articulo a INNER JOIN pedidos p on a.id = p.id_articulo WHERE p.id_orden_compra = %s AND a.id_torre = %s AND a.estado <> 'ENTREGADO' AND a.estado <> 'ELIMINADO'\"\n cursor.execute(sql, (pedido[cont][1], torre[0]))\n articulos = cursor.fetchall()\n sql = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n cursor.executemany(sql, articulos)\n sql = \"UPDATE pedidos SET estado = 'ENPROGRESO', id_robot = {} WHERE id_articulo = %s\".format(\n id_r)\n cursor.executemany(sql, articulos)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPath -- {}\".format(err))\n return None, False\n finally:\n cursor.close()\n return [torre[1], torre[2]], True\n cont = cont + 1\n self.db.autocommit = False\n return None, True\n\n # User modified matrix, check new towers to insert and old ones to delete\n def updatematrix(self, matrix, deposito, col_der, col_izq, fila_arr, fila_abj):\n cursor = self.createCursor()\n inserted = [] # New towers\n deleted = [] # Deleted towers\n pinserted = [] # New platforms\n pdeleted = [] # Deleted platfoms\n # Apply new dimensions to the deposito\n if fila_abj < 0:\n # Registrar que positiciones ya no estan de las filas eliminadas\n for fila in range(len(deposito) - (-fila_abj), len(deposito)):\n for columna in range(len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n deposito = deposito[:fila_abj] # Eliminar filas abajo del deposito\n elif fila_abj > 0:\n new = [0 for i in deposito[0]] # Creo una fila de 0 de igual longitud que las demas\n for k in range(fila_abj): # Hago append de fila de 0 por la cantidad de nuevas filas\n deposito.append(new)\n if fila_arr < 0: # Elimino las filas de arriba\n for fila in range(0, (-fila_arr)): # agrego los elementos que ya no estan en la matriz a deleted\n for columna in range(len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n deposito = deposito[-fila_arr:]\n for t in deleted:\n t[0] = t[0] + fila_arr\n for p in pdeleted:\n p[0] = p[0] + fila_arr\n cursor.execute(\"UPDATE torre SET loc1 = loc1 + %s\", (fila_arr,)) # actualizo posiciones en la base\n cursor.execute(\"UPDATE plataforma_origen SET loc1 = loc1 + %s\", (fila_arr,))\n elif fila_arr > 0:\n new = [0 for i in deposito[0]]\n for k in range(fila_arr): # Insert al principo las nuevas filas del deposito\n deposito.insert(0, new)\n cursor.execute(\"UPDATE torre SET loc1 = loc1 + %s\", (fila_arr, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc1 = loc1 + %s\", (fila_arr,))\n if col_der < 0:\n for fila in range(len(deposito)):\n for columna in range(len(deposito[fila]) - (-col_der), len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n for j in range(len(deposito)): # Saco la cantidad de elementos en cada fila\n deposito[j] = deposito[j][:col_der]\n elif col_der > 0:\n for k in range(col_der): # Agrego la cantidad de columnas nuevas en cada fila\n for j in deposito:\n j.append(0)\n if col_izq < 0:\n for fila in range(len(deposito)):\n for columna in range(0, (-col_izq)):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n for j in range(len(deposito)): # Saco los elementos del principio de cada fila\n deposito[j] = deposito[j][-col_izq:]\n cursor.execute(\"UPDATE torre SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n elif col_izq > 0:\n for k in range(col_izq): # Agrego elementos nuevos en cada fila\n for j in deposito:\n j.insert(0, 0)\n cursor.execute(\"UPDATE torre SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n # Actualizo las posiciones de lo que ya marque para borrar\n for t in deleted:\n #t[0] = t[0] + fila_arr\n t[1] = t[1] + col_izq\n for p in pdeleted:\n #p[0] = p[0] + fila_arr\n p[1] = p[1] + col_izq\n for fila in range(len(matrix)): # Iterate new matriz to check changes\n for columna in range(len(matrix[fila])):\n # Cuando el nuevo deposito es mas grande\n if matrix[fila][columna] == 1 and deposito[fila][columna] != 1: # New Tower\n inserted.append([fila, columna]) # Insert on inserted array\n if deposito[fila][columna] == 1 and matrix[fila][columna] != 1: # Tower deleted from original\n deleted.append([fila, columna])\n if matrix[fila][columna] == 2 and deposito[fila][columna] != 2: # New Platform\n pinserted.append([fila, columna]) # Insert on inserted array\n if deposito[fila][columna] == 2 and matrix[fila][columna] != 2: # Platform deleted from original\n pdeleted.append([fila, columna])\n try:\n if inserted: # If inserted has new positions\n for i in inserted:\n query = \"INSERT INTO torre (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (i[0], i[1], 'LIBRE')\n cursor.execute(query, val)\n if deleted:\n for i in deleted:\n query = \"SELECT id FROM torre WHERE loc1 = %s AND loc2 = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(query, (i[0], i[1]))\n ok = self.deleteArticlebyTower(cursor, cursor.fetchone()[0])\n # Si hay algun pedido pendiente hago rollback, si hubo error hago rollback\n if not ok or ok == \"pending\":\n self.db.rollback()\n return ok\n query = \"UPDATE torre SET estado = 'ELIMINADO' WHERE loc1 = %s AND loc2 = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(query, (i[0], i[1]))\n if pinserted: # If pinserted has new positions\n for i in pinserted:\n query = \"INSERT INTO plataforma_origen (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (i[0], i[1], 'LIBRE')\n cursor.execute(query, val)\n r = None\n if pdeleted:\n # Primero marco todas las platafomras como eliminadas\n for i in pdeleted:\n query = \"UPDATE plataforma_origen SET estado = 'ELIMINADO' WHERE loc1 = %s AND loc2 = %s AND estado = 'LIBRE'\"\n cursor.execute(query, (i[0], i[1]))\n # Si hay algun pedido en curso hago rollback, si hubo error hago rollback\n if cursor.rowcount == 0:\n self.db.rollback()\n return \"pending\"\n # Selecciono todas las plataformas disponibles\n cursor.execute(\"SELECT id FROM plataforma_origen WHERE estado != 'ELIMINADO'\")\n r = cursor.fetchall()\n for i in pdeleted:\n # Selecciono todos los pedidos que estaban encolados a esa plataforma\n query = \"SELECT p.id_orden_compra FROM pedidos p INNER JOIN plataforma_origen po on p.id_plataforma = po.id WHERE po.loc1 = %s AND po.loc2 = %s AND p.estado = 'ENCOLADO'\"\n cursor.execute(query, (i[0], i[1]))\n pedidos = cursor.fetchall()\n # Armo una lista circular para ir iterando por todas las plataformas\n plataformas = cycle(r)\n for j in pedidos:\n sql = \"UPDATE pedidos SET id_plataforma = %s WHERE id_orden_compra = %s\"\n cursor.execute(sql, (next(plataformas)[0], j[0]))\n self.db.commit()\n if r is None:\n cursor.execute(\"SELECT id from plataforma_origen WHERE estado != 'ELIMINADO'\")\n r = cursor.fetchall()\n with open('platforms.txt', 'w') as f:\n for i in r:\n f.write(f'{i[0]}\\n')\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateMatrix -- {}\".format(err))\n self.db.rollback()\n return False\n finally:\n cursor.close()\n return True\n # TODO todavia esta guardando la matriz nueva cuando hay pedidos encolados\n\n def deleteArticlebyTower(self, cursor, id_torre):\n try:\n # Busco todos los articulos que estan en esa torre\n sql = \"SELECT id FROM articulo WHERE id_torre = %s AND estado != 'ELIMINADO'\"\n cursor.execute(sql, (id_torre,))\n articulos = cursor.fetchall() # Lista de tuplas con solo el id\n # Por cada articulo si hay un pedido ejecutandose con ese articulo rollback all\n for articulo in articulos:\n sql = \"SELECT * FROM pedidos WHERE id_articulo = %s AND estado != 'ENCOLADO' AND estado != 'FINALIZADO' AND estado != 'ELIMINADO'\"\n cursor.execute(sql, (articulo[0],))\n pedidos = cursor.fetchall()\n if pedidos:\n return \"pending\"\n # Elimino logicamente el articulo\n sql = \"UPDATE articulo SET estado = 'ELIMINADO' WHERE id = %s AND estado <> 'ENTREGADO'\"\n cursor.execute(sql, (articulo[0],))\n # Elimino logicamente los pedidos asociados a ese articulo\n sql = \"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id_articulo = %s AND estado <> 'FINALIZADO'\"\n cursor.execute(sql, (articulo[0],))\n return True\n except Exception as err:\n app.logger().exception(\"Error en DELETE deleteArticlebyTower -- {}\".format(err))\n return False\n\n def pedidosExecuting(self):\n try:\n sql = \"SELECT id FROM pedidos WHERE estado <> 'ENCOLADO' AND estado <> 'FINALIZADO' AND estado <> 'ELIMINADO'\"\n cursor = self.createCursor()\n cursor.execute(sql)\n res = cursor.fetchall()\n cursor.close()\n if not res:\n return False\n else:\n return True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateMatrix -- {}\".format(err))\n return False\n", "sub_path": "app/services/db/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 41114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "mysql.connector.connector.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 17, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 17, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.logger", "line_number": 37, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 56, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 70, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 85, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 98, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 113, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 130, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 142, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 158, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 169, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 184, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 201, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 228, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 243, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 268, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 283, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 299, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 312, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 330, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 351, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 366, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 381, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 403, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 424, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 465, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 478, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 481, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 481, "usage_type": "name"}, {"api_name": "app.logger", "line_number": 494, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 506, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 506, "usage_type": "name"}, {"api_name": "app.logger", "line_number": 513, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 537, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 553, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 581, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 596, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 630, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 653, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 702, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 841, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 853, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 882, "usage_type": "call"}, {"api_name": "app.logger", "line_number": 897, "usage_type": "call"}]} +{"seq_id": "414327222", "text": "# This function test verifys image preprocessing, imsub & imsub thresholding, contouring, camera, and pixelwise scannign\n\nfrom piGantry.imageProcess import cameraFunc as camera\nfrom piGantry.piSerial import mathFunc as mathFunc\nimport cv2\nimport time\nimport json\n\n\n\n# Set width & height to high value to set resolution as max \ncam = camera.cameraObj(10000, 10000, -3)\n\ndef main():\n \n while True:\n try:\n img = cam.grabFrame()\n if img is not None:\n cv2.imwrite(\"tests/testImg.png\", img)\n break\n except Exception as e:\n print(e)\n print(\"camera done\")\n # Release cam only when photo taking is done\n cam.cam.release()\n\n\n t3 = time.perf_counter()\n\n # Read images from data set\n exposureVal = 4\n frameVal = 4\n\n img2 = cv2.imread(\"imgData/comparisonExternalPSU/22.56v_{0}/opencv_frame_{1}.png\".format(exposureVal, frameVal))\n img3 = cv2.imread(\"imgData/comparisonExternalPSU/22.81v_{0}/opencv_frame_{1}.png\".format(exposureVal, frameVal))\n img4 = cv2.imread(\"imgData/comparisonExternalPSU/23.17v_{0}/opencv_frame_{1}.png\".format(5, frameVal))\n img5 = cv2.imread(\"imgData/comparisonExternalPSU/23.56v_{0}/opencv_frame_{1}.png\".format(5, frameVal))\n\n # print(\"compareImg far 1 & 2 {0}\\n\".format(camera.compareImg(img2,img3, 0.005)))\n\n # Image preprocessing\n t1 = time.perf_counter()\n procImg = camera.preProcImg(img2)\n procImg2 = camera.preProcImg(img3)\n procImg3 = camera.preProcImg(img4)\n procImg4 = camera.preProcImg(img5)\n t2 = time.perf_counter()\n\n print(t2)\n\n # Pixelwise operations\n def pixelScan(img):\n print(\"\\npixel start\\n\")\n t1 = time.perf_counter()\n # Calling on actual imageProcess module's pixelWiseScan function\n pixelWise=camera.pixelWiseScan(img, 1, 50)\n t2 = time.perf_counter()\n print(\"pixel end in {0} \\n pixel: {1} \\n dots: {2}\\n\".format((t2-t1),pixelWise,len(pixelWise)))\n\n # Contouring \n def contourScan(img, origImg, imgName, minArea, maxArea, exemptArea):\n print(\"\\ncontour1 start\\n\")\n t1 = time.perf_counter()\n # Calling on actual imageProcess module's contour function\n contours=camera.retContour(img, origImg, minArea, maxArea, exemptArea, \"{}Contour.png\".format(imgName))\n t2 = time.perf_counter()\n print(\"contour end in {0} \\n contour: {1} \\n dots: {2}\\n\".format((t2-t1),contours[0],len(contours[0])))\n # Plot best fit line\n mathFunc.bestFitPoly(contours[1][1], contours[1][0], 4, origImg)\n return contours[0]\n\n\n contourList = [contourScan(procImg, img2, \"22.56v_5\", 5, 50, 100), \n contourScan(procImg2, img3, \"22.81v_5\", 1, 50, 100),\n contourScan(procImg3, img4, \"23.17v_5\", 5, 50, 100), \n contourScan(procImg4, img5, \"23.56v_5\", 1, 50, 100)]\n\n for i in enumerate(contourList):\n with open(\"dataContour{0}.json\".format(i[0]), \"w\") as write_file:\n json.dump(i[1], write_file, indent=4)\n \nmain()", "sub_path": "tests/dotTest.py", "file_name": "dotTest.py", "file_ext": "py", "file_size_in_byte": 3055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "piGantry.imageProcess.cameraFunc.cameraObj", "line_number": 12, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 12, "usage_type": "name"}, {"api_name": "cv2.imwrite", "line_number": 20, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 43, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc.preProcImg", "line_number": 44, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 44, "usage_type": "name"}, {"api_name": "piGantry.imageProcess.cameraFunc.preProcImg", "line_number": 45, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 45, "usage_type": "name"}, {"api_name": "piGantry.imageProcess.cameraFunc.preProcImg", "line_number": 46, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 46, "usage_type": "name"}, {"api_name": "piGantry.imageProcess.cameraFunc.preProcImg", "line_number": 47, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 47, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 48, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 55, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc.pixelWiseScan", "line_number": 57, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 57, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 58, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 64, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc.retContour", "line_number": 66, "usage_type": "call"}, {"api_name": "piGantry.imageProcess.cameraFunc", "line_number": 66, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 67, "usage_type": "call"}, {"api_name": "piGantry.piSerial.mathFunc.bestFitPoly", "line_number": 70, "usage_type": "call"}, {"api_name": "piGantry.piSerial.mathFunc", "line_number": 70, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "610858436", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 14 10:02:45 2019\r\n\r\n@author: Matthew\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 9 08:53:27 2019\r\n\r\n@author: Matthew\r\n\"\"\"\r\n\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nimport statistics\r\nimport csv\r\n\r\nfilename = r\"CSV Files\\activity2.csv\"\r\n\r\ndates, steps_data, missing_data = [], [], []\r\nmissing_data_count = 0\r\n\r\nwith open(filename) as file:\r\n \r\n thefile = csv.reader(file)\r\n headerRow = next(thefile)\r\n hold_steps = 0\r\n prev_date = \"\"\r\n \r\n for row in thefile:\r\n try:\r\n current_date = datetime.strptime(row[1], \"%Y-%m-%d\").date()\r\n steps_datum = eval(row[0])\r\n except:\r\n missing_data_count += 1\r\n missing_data.append(current_date)\r\n steps_datum = 0\r\n if current_date == prev_date:\r\n hold_steps += 0\r\n else:\r\n dates.append(current_date)\r\n steps_data.append(hold_steps)\r\n prev_date = current_date\r\n hold_steps = 0\r\n hold_steps += 0\r\n else:\r\n if current_date == prev_date:\r\n hold_steps += steps_datum\r\n else:\r\n dates.append(current_date)\r\n steps_data.append(hold_steps)\r\n prev_date = current_date\r\n hold_steps = 0\r\n hold_steps += steps_datum\r\n \r\n file.close()\r\n \r\n \r\n\r\nfig = plt.figure(dpi=100, figsize=(12, 7))\r\nplt.hist(x = steps_data, bins='auto', color='#0504aa', alpha=0.7)\r\nplt.title(\"Steps Taken in 2 Months\")\r\nplt.xlabel(\"Number of Steps Taken\")\r\nplt.ylabel(\"Frequency\")\r\n\r\nplt.savefig(r\"Figures\\Steps Each Day 2 (HISTOGRAM).png\")\r\n\r\nplt.show()\r\n\r\nprint(\"The number of missing data:\", missing_data_count)\r\n\r\nfor i in range(len(steps_data)-1):\r\n print(\"The total number of steps taken (Day %i):\" %(i+1), steps_data[i+1])\r\n \r\nprint(\"Average:\", statistics.mean(steps_data))\r\nprint(\"Median:\", statistics.median(steps_data))\r\n\r\n", "sub_path": "Exercise_12 - CSV Files/Matthew ES - Steps Each Day 2 (HISTOGRAM).py", "file_name": "Matthew ES - Steps Each Day 2 (HISTOGRAM).py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "csv.reader", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "statistics.mean", "line_number": 77, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "276008291", "text": "import io\nimport os.path\nimport re\nimport slackbot_settings\nfrom slacker import Slacker\nfrom requests import ReadTimeout\n\n\nslackapi = Slacker(slackbot_settings.API_TOKEN)\n\nusers = {}\n\ndef get_user(user_id):\n if user_id not in users:\n response = slackapi.users.info(user_id)\n users[user_id] = response.body[\"user\"]\n return users[user_id]\n\ndef get_channel(channel_id):\n response = slackapi.channels.info(channel_id)\n return response.body[\"channel\"]\n\ndef get_message(channel_id, ts):\n latest = str(float(ts) + 0.000001)\n oldest = str(float(ts) - 0.000001)\n response = slackapi.channels.history(channel=channel_id, latest=latest, oldest=oldest, count=1)\n if response.body[\"messages\"]:\n return response.body[\"messages\"][0]\n return None\n\ndef get_user_mame(user_id):\n emoji = re.compile(r\":.+?:\")\n user = get_user(user_id)\n return emoji.sub('', user[\"profile\"][\"real_name\"])\n\ndef get_channel_name(channel_id):\n channel = get_channel(channel_id)\n return channel[\"name\"]\n\ndef get_channel_tag(channel_id):\n return \"<#{0}|{1}>\".format(channel_id, get_channel_name(channel_id))\n\ndef post_image(message, pillow_image, title=None, comment=None, file_name=None):\n def filename_to_filetype(file_name):\n root, ext = os.path.splitext(file_name or 'sample.png')\n file_type = ext[1:] if ext else 'png'\n return file_type if file_type != 'jpg' else 'jpeg'\n\n output = io.BytesIO()\n pillow_image.save(output, filename_to_filetype(file_name), quality=100)\n data = {\n 'filename': file_name,\n 'title': title,\n 'initial_comment': comment,\n 'channels': message.body['channel']\n }\n files = {\n \"file\": output.getvalue()\n }\n try:\n slackapi.files.post('files.upload', data=data, files=files)\n except ReadTimeout as e:\n message.send(\"slackの調子が少し悪いみたいですね...\")\n\n\n", "sub_path": "plugins/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1919, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "slacker.Slacker", "line_number": 9, "usage_type": "call"}, {"api_name": "slackbot_settings.API_TOKEN", "line_number": 9, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.path.splitext", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.ReadTimeout", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "251337190", "text": "import os\nimport pytest\nimport requests\nfrom dotenv import find_dotenv, load_dotenv\nfrom app import create_app\n\n\nclass MockResponse(object):\n def __init__(self, json):\n self.response_json = json\n\n def json(self):\n return self.response_json\n\n\n@pytest.fixture\ndef client():\n # Use our test integration config instead of the 'real' version\n file_path = find_dotenv('.env.test')\n\n # Record existing env variable values\n existing_values = {\n \"TRELLO_TOKEN\": os.environ.get(\"TRELLO_TOKEN\", \"\"),\n \"TRELLO_KEY\": os.environ.get(\"TRELLO_KEY\", \"\"),\n \"TRELLO_BOARD_NAME\": os.environ.get(\"TRELLO_BOARD_NAME\", \"\"),\n \"TRELLO_BOARD_ID\": os.environ.get(\"TRELLO_BOARD_ID\", \"\"),\n }\n\n load_dotenv(file_path, override=True)\n\n # Create the new app.\n test_app = create_app('test-todo-list-id', 'test-doing-list-id', 'test-done-list-id')\n\n # Use the app to create a test_client that can be used in our tests.\n test_app.testing = True\n with test_app.test_client() as client:\n yield client\n\n # Restore proper config\n file_path = find_dotenv('.env')\n if file_path:\n load_dotenv(file_path, override=True)\n else:\n for env_var_name in existing_values:\n os.environ[env_var_name] = existing_values[env_var_name]\n\n\ndef test_index_page(monkeypatch, client):\n\n # Arrange\n def mock_get(url, params):\n assert url == f'https://api.trello.com/1/boards/trello-board-id/cards'\n assert params['key'] == 'trello-key'\n assert params['token'] == 'trello-token'\n return MockResponse(\n [\n {\n \"name\": \"test-item-1\",\n \"id\": \"test-id-1\",\n \"desc\": \"test-description-1\",\n \"due\": \"2020-12-25T12:00:00.000000Z\",\n \"dateLastActivity\": \"2020-10-25T12:00:00.000000Z\",\n \"idList\": \"test-todo-list-id\"\n },\n {\n \"name\": \"test-item-2\",\n \"id\": \"test-id-2\",\n \"desc\": \"test-description-2\",\n \"due\": \"2020-12-26T12:00:00.000000Z\",\n \"dateLastActivity\": \"2020-10-26T12:00:00.000000Z\",\n \"idList\": \"test-doing-list-id\"\n }\n ])\n\n\n monkeypatch.setattr(requests, \"get\", mock_get)\n\n # Act\n response = client.get('/')\n\n # Assert\n assert response.status_code == 200\n\n decoded_response = response.data.decode('utf-8')\n\n assert \"test-item-1\" in decoded_response\n assert \"test-item-2\" in decoded_response\n", "sub_path": "tests/test_integration.py", "file_name": "test_integration.py", "file_ext": "py", "file_size_in_byte": 2615, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "dotenv.find_dotenv", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 29, "usage_type": "call"}, {"api_name": "app.create_app", "line_number": 32, "usage_type": "call"}, {"api_name": "dotenv.find_dotenv", "line_number": 40, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 42, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "359430962", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 21:56:00 2017\n\n@author: ankur\n\"\"\"\n\nimport os\nimport csv\nimport cv2\nimport matplotlib\nmatplotlib.use('Qt4Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom sklearn.model_selection import train_test_split\n\nfrom data_generator import generator\n\n##utility function to plot data\n\ndef plot_image_data(train_batch_X, train_batch_y):\n gs = gridspec.GridSpec(len(train_batch_y)//3, 3, hspace = 0.5, wspace=0.3)\n plt.figure(figsize=(5, len(train_batch_y)*1.5//3))\n\n for i in range(len(train_batch_X)):\n ax = plt.subplot(gs[i])\n #ax.tick_params('off')\n ax.imshow(cv2.cvtColor(train_batch_X[i], cv2.COLOR_BGR2RGB))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n title = train_batch_y[i]\n ax.set_title(title)\n plt.xticks([], [])\n plt.yticks([], [])\n plt.axis('off')\n plt.suptitle(\"Sample Images from generator\") \n plt.show()\n plt.savefig(\"generator.png\")\n \n \nsamples = []\ndata_dir = ['./'];\nfor training_dir in data_dir:\n if not os.path.isdir(training_dir):\n print(\"data directory doesn't exist\")\n\n csv_file = os.path.join(training_dir, 'driving_log.csv')\n if not os.path.isfile(csv_file):\n print(\"Could not find CSV file\")\n\n image_dir = os.path.join(training_dir, 'IMG')\n if not os.path.isdir(image_dir):\n print(\"Could not find image directory\")\n \n print(csv_file)\n with open(csv_file) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2, random_state=1200)\ntrain_samples = train_samples[5:6]\ntrain_generator = generator(train_samples, batch_size=1, drop_prob=0)\nplt.figure(figsize=(6, 2))\nprint(train_samples)\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,1)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][1].strip()), cv2.COLOR_BGR2RGB))\ns = float(train_samples[0][3])+0.2;\nplt.title('Left: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,2)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][0].strip()), cv2.COLOR_BGR2RGB))\ns = s-0.2;\nplt.title('Center: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,3)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][2].strip()), cv2.COLOR_BGR2RGB))\ns = s-0.2;\nplt.title('Right: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.suptitle(\"Input to generator\") \nplt.show()\nplt.savefig(\"input.png\")\nvalidation_generator = generator(validation_samples, batch_size=1)\ntrain_batch_X, train_batch_y = next(train_generator)\nplot_image_data(train_batch_X, train_batch_y)", "sub_path": "generator_test.py", "file_name": "generator_test.py", "file_ext": "py", "file_size_in_byte": 2742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "matplotlib.use", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 29, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 62, "usage_type": "call"}, {"api_name": "data_generator.generator", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 70, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 76, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 82, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "data_generator.generator", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "110230258", "text": "from datetime import datetime\nfrom kombu import Connection\n\n\nwith Connection('amqp://guest:guest@localhost:5672//') as connection:\n simple_queue = connection.SimpleQueue('SIMPLE_QUEUE')\n message = str.format(\n '[@] Hello, World! send at {}',\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n simple_queue.put(message)\n print(str.format('[~] Send: {}', message))\n simple_queue.close()\n", "sub_path": "packages/message_broker_packages/rabbit_mq/kombu_package/userguide/examples/hello_publisher.py", "file_name": "hello_publisher.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "kombu.Connection", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "245309386", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 22 11:17:51 2017\n\n@author: louis\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom gen_data import load_2Dblobs\nfrom multilayer_nn import SimpleNeuralNetwork\nfrom sklearn.preprocessing import OneHotEncoder\nimport nn_utilities as utils\n\nnp.random.seed(1)\n\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nX_train, Y_train, X_test, Y_test = load_2Dblobs(\n 5000, n_center=5, ratio=0.5, one_hot=True)\n\nenc = OneHotEncoder(sparse=False)\n# training sample should be large enough to cover all encoding cases\nY_train_one_hot, idx_to_one_hot, one_hot_to_idx = utils.convert_to_one_hot(\n Y_train, enc)\nY_test_one_hot, _, _ = utils.convert_to_one_hot(Y_test, enc)\n\nlayers = []\nlayers.append((X_train.shape[1], \"linear\"))\nlayers.append((10, \"relu\"))\nlayers.append((10, \"relu\"))\nlayers.append((10, \"relu\"))\nlayers.append((Y_train_one_hot.shape[1], \"softmax\"))\n\nmodel = SimpleNeuralNetwork(layers, \"xavier\")\n\"\"\"\n# gradient check\ngrad_check_x = X_train[0, :].reshape(-1, 1)\ngrad_check_y = Y_train_one_hot[0, :].reshape(-1, 1)\nmodel.gradient_check(grad_check_x, grad_check_y, epsilon=1e-8, weight_decay=.0)\n\"\"\"\n\n# model training\nmodel.train(\n X_train.T,\n Y_train_one_hot.T,\n X_test.T,\n Y_test_one_hot.T,\n mini_batch_size=64,\n learning_rate=0.005,\n weight_decay=0.,\n keep_prob=0.5,\n num_epochs=100)\n\nplt.scatter(\n X_train[:, 0], X_train[:, 1], s=40, c=Y_train, cmap=plt.cm.Spectral)\n\nutils.plot_decision_boundary(lambda x, y: model.predict_class_output(x.T, y),\n X_train, Y_train, one_hot_to_idx)\n", "sub_path": "machine_learning/train_nn.py", "file_name": "train_nn.py", "file_ext": "py", "file_size_in_byte": 1734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "numpy.random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "gen_data.load_2Dblobs", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 25, "usage_type": "call"}, {"api_name": "nn_utilities.convert_to_one_hot", "line_number": 27, "usage_type": "call"}, {"api_name": "nn_utilities.convert_to_one_hot", "line_number": 29, "usage_type": "call"}, {"api_name": "multilayer_nn.SimpleNeuralNetwork", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 59, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "nn_utilities.plot_decision_boundary", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "141085615", "text": "from .models import User\nfrom django.conf import settings\n\ndef setting_processor(request):\n\treturn {\n\t\t'site_name':settings.SITE_NAME\n\t}\n\ndef user_processor(request):\n logged_user = {}\n if 'logged_user' in request.session:\n logged_user = request.session['logged_user']\n if 'id' in logged_user:\n user = User.objects.filter(id=logged_user['id'])\n if len(user) > 0:\n user = user[0]\n return {'logged_user' : {\n 'username':user.username,\n 'fullname':user.fullname,\n 'data':user.data,\n 'email':user.email,\n }}\n return {}\n", "sub_path": "simpleoj/context_processor.py", "file_name": "context_processor.py", "file_ext": "py", "file_size_in_byte": 641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.settings.SITE_NAME", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 6, "usage_type": "name"}, {"api_name": "models.User.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "616109561", "text": "import torch\nimport torch.onnx\nimport os\nfrom unet import UNet\n\ncheckpoint_root = './checkpoint/'\ncuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda else \"cpu\")\n\nunet = UNet()\nunet = unet.to(device)\ndummy_input = torch.randn(1, 1, 256, 256, device=device)\nfor file in os.listdir(checkpoint_root):\n if file.startswith(\"unet\") and file.endswith(\".tar\"):\n checkpoint = torch.load(checkpoint_root + file, map_location='cpu')\n unet.load_state_dict(checkpoint['state_dict'])\n\n\ntorch.onnx.export(unet, dummy_input, checkpoint_root + \"onnx_unet.onnx\")\n", "sub_path": "torch_to_onnx.py", "file_name": "torch_to_onnx.py", "file_ext": "py", "file_size_in_byte": 580, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "torch.cuda.is_available", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 8, "usage_type": "call"}, {"api_name": "unet.UNet", "line_number": 10, "usage_type": "call"}, {"api_name": "unet.to", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 15, "usage_type": "call"}, {"api_name": "unet.load_state_dict", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.onnx.export", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.onnx", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "289631098", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport re\nimport logging\nimport threading\nimport socket\nimport select\nimport time\nimport base64\n\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s %(message)s', handlers=[logging.StreamHandler()])\n\n\nBLACKLOG = 5\nCR = chr(13) \nLF = chr(10) \nCRLF = CR + LF\n\nHTTP_STATUS_CODE = {\n\n 200: 'HTTP/1.1 200 OK',\n 201: 'HTTP/1.1 201 Created', \n 202: 'HTTP/1.1 202 Accepted',\n 203: 'HTTP/1.1 203 Non-Authoritative Information (since HTTP/1.1)',\n 204: 'HTTP/1.1 204 No Content',\n 205: 'HTTP/1.1 205 Reset Content',\n 206: 'HTTP/1.1 206 Partial Content',\n 207: 'HTTP/1.1 207 Multi-Status(WebDAV; RFC 4918)',\n 208: 'HTTP/1.1 208 Already Reported (WebDAV; RFC 5842)',\n 226: 'HTTP/1.1 226 IM Used (RFC 3229)',\n 300: 'HTTP/1.1 300 Multiple Choices',\n 301: 'HTTP/1.1 301 Moved Permanently',\n 302: 'HTTP/1.1 302 Found',\n 303: 'HTTP/1.1 303 See Other',\n 304: 'HTTP/1.1 304 Not Modified',\n 400: 'HTTP/1.1 400 Bad Request',\n 401: 'HTTP/1.1 401 Unauthorized',\n 403: 'HTTP/1.1 403 Forbidden',\n 404: 'HTTP/1.1 404 Not Found',\n 505: 'HTTP/1.1 505 HTTP Version Not Supported'\n\n }\n\n\n_current_date = lambda: time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\n\n\ndef gen_headers(**kwargs):\n\n code = kwargs.get('code', 200)\n\n headers = HTTP_STATUS_CODE[code] + '\\n'\n headers += 'Date: %s\\n'\n headers += 'Content-Type: %s\\n'\n headers += 'Server: HTTPServer\\n'\n headers += 'Connection: close\\n'\n headers += '\\n'\n\n\n headers = headers % (_current_date(), kwargs.get('content-type', 'text/html'))\n\n return headers\n\nclass HTTPRequestParser(object):\n\n def __init__(self, request):\n\n self.error_code = (None, None)\n self.error = 0\n\n self.method = None\n self.uri = None\n self.ver = None\n self.close_connection = 1\n self._parse_request(request)\n\n def set_error(self, code, error):\n self.error_code = (code, error)\n self.error = 1\n\n def _parse_request(self, request):\n request = request.decode('ascii').split('\\n', 1)\n requestline = request[0].split()\n if len(requestline) == 3:\n method, uri, ver = requestline\n if ver[:4] != 'HTTP':\n self.set_error(400, \"Bad request version (%r)\" % version)\n return False\n try:\n version = ver.split('/', 1)[1]\n version_number = version.split('.')\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError) as e:\n self.set_error(400, \"Bad request version\")\n return False\n if version_number == (1, 1):\n self.close_connection = 0\n else:\n self.set_error(505, \"HTTP Version Not Supported. (%r)\" % version)\n return False\n elif len(requestline) == 2:\n method, uri = requestline\n if method != 'GET':\n self.set_error(400, \"Bad request method\")\n return False\n else:\n self.set_error(400, \"Bad request syntax\")\n return False\n self.method = method\n self.uri = uri\n self.ver = version\n \n if len(request) == 2:\n headers = dict(re.findall(r\"(?P.*?): (?P.*?)\\r\\n\", request[1]))\n self.headers = headers\n return True\n\n\n\n\nclass HTTPServer(object):\n\n def __init__(self, host='', port=8080):\n\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP Socket\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.s.bind((host, port))\n self.s.listen(BLACKLOG)\n\n self.host = socket.gethostbyname(host)\n self.port = port\n\n self.routes = {}\n \n def route(self, *args, **kwargs):\n def decorator(func):\n for arg in args:\n self.routes[arg] = {'func': func, 'kwargs': dict(kwargs)}\n return func\n return decorator\n\n def serve(self, output=True):\n logger.propagate = output\n logger.info(\"Serving on %s port %s\" % (self.host, self.port))\n logger.info(\"Server is awaiting for connections..\")\n while 1:\n conn, addr = self.s.accept()\n logger.info(\"Connection from %s\" % ':'.join([str(element) for element in addr]))\n data = conn.recv(4096)\n request = HTTPRequestParser(data)\n if not request.error:\n connection_type = request.headers.get('Connection', 'close')\n response = ''\n\n if request.method == 'GET':\n match = None\n for pattern, dictionary in iter(self.routes.items()):\n match = re.match(pattern, request.uri)\n if match:\n headers = gen_headers(**dictionary['kwargs'])\n response += headers\n response += dictionary['func'](*match.groups())\n break\n if not match:\n response += gen_headers(code=404)\n response += \"404 Not Found

Not found

The requested URL %s was not found on this server.

\" % (request.uri)\n\n if response:\n conn.sendall(bytes(response, 'UTF-8'))\n\n if connection_type == 'close':\n conn.close()\n\n conn.close() # Close anyway\n self.s.close()\n\n\n\n\n\n \n\n\n\n\n\n", "sub_path": "HTTPServ/HTTPServer.py", "file_name": "HTTPServer.py", "file_ext": "py", "file_size_in_byte": 6179, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 16, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 121, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 132, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 132, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 132, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 133, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 133, "usage_type": "attribute"}, {"api_name": "socket.gethostbyname", "line_number": 137, "usage_type": "call"}, {"api_name": "re.match", "line_number": 165, "usage_type": "call"}]} +{"seq_id": "195724259", "text": "#coding: utf-8\nimport math\nimport time\n#import numpy as np\nimport numpy.random as npr\nimport cupy as cp #GPUを使うためのnumpy\nimport chainer \nfrom chainer import cuda, Function, Variable, optimizers\nfrom chainer import Link, Chain\nimport chainer.functions as F\nimport chainer.links as L\n\nfrom NNFP import load_data \nfrom NNFP import result_plot \nfrom NNFP import normalize_array\nfrom NNFP import Deep_neural_network\nfrom NNFP import Finger_print\n\n\ntask_params = {'target_name' : 'measured log solubility in mols per litre',\n\t\t\t\t'data_file' : 'delaney.csv'}\n\nN_train = 70\nN_val = 1\nN_test = 10\n\n\nmodel_params = dict(fp_length = 50, \n\t\t\t\t\tfp_depth = 4, #NNの層と、FPの半径は同じ\n\t\t\t\t\tconv_width = 20, #必要なパラメータはこれだけ(?)\n\t\t\t\t\th1_size = 100, #最上位の中間層のサイズ\n\t\t\t\t\tL2_reg = cp.exp(-2))\n\ntrain_params = dict(num_iters = 100,\n\t\t\t\t\tbatch_size = 50,\n\t\t\t\t\tinit_scale = cp.exp(-4),\n\t\t\t\t\tstep_size = cp.exp(-6))\n\n\t\nclass Main(Chain):\n\tdef __init__(self, model_params):\n\t\tsuper(Main, self).__init__(\n\t\t\tfp = Finger_print.FP(model_params),\n\t\t\tdnn = Deep_neural_network.DNN(model_params),\n\t\t)\n\t\n\tdef __call__(self, x, y):\n\t\tt = time.time()\n\t\ty = Variable(cp.array(y, dtype=cp.float32))\n\t\tprint(\"variable : \", time.time() - t)\n\t\tpred = self.prediction(x)\n\t\treturn F.mean_squared_error(pred, y)\n\n\tdef prediction(self, x):\n\t\tx = Variable(cuda.to_cpu(x))\n\t\tfinger_print = self.fp(x)\n\t\tpred = self.dnn(finger_print)\n\t\treturn pred\n\n\tdef mse(self, x, y, undo_norm):\n\t\ty = Variable(cp.array(y, dtype=cp.float32))\n\t\tpred = undo_norm(self.prediction(x))\n\t\treturn F.mean_squared_error(pred, y)\n\t\ndef train_nn(model, train_smiles, train_raw_targets, seed=0,\n\t\t\t\tvalidation_smiles=None, validation_raw_targets=None):\n\n\tnum_print_examples = N_train\n\ttrain_targets, undo_norm = normalize_array(train_raw_targets)\n\ttraining_curve = []\n\toptimizer = optimizers.Adam()\n\toptimizer.setup(model)\n\toptimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\t\n\t\n\tnum_epoch = 100\n\tnum_data = len(train_smiles)\n\tbatch_size = 50\n\tx = train_smiles\n\ty = train_targets\n\tsff_idx = npr.permutation(num_data)\n\tTIME = time.time()\n\tfor epoch in range(num_epoch):\n\t\tepoch_time = time.time()\n\t\tfor idx in range(0,num_data, batch_size):\n\t\t\tbatched_x = x[sff_idx[idx:idx+batch_size\n\t\t\t\tif idx + batch_size < num_data else num_data]]\n\t\t\tbatched_y = y[sff_idx[idx:idx+batch_size\n\t\t\t\tif idx + batch_size < num_data else num_data]]\n\t\t\tupdate_time\t = time.time()\n\t\t\tmodel.zerograds()\n\t\t\tloss = model(batched_x, batched_y)\n\t\t\tloss.backward()\n\t\t\toptimizer.update()\n\t\t\tprint(\"UPDATE TIME : \", time.time() - update_time)\n\t\t#print \"epoch \", epoch, \"loss\", loss._data[0]\n\t\tif epoch % 10 == 0:\n\t\t\tprint_time = time.time()\n\t\t\ttrain_preds = model.mse(train_smiles, train_raw_targets, undo_norm)\n\t\t\tcur_loss = loss._data[0]\n\t\t\ttraining_curve.append(cur_loss)\n\t\t\tprint(\"PRINT TIME : \", time.time() - print_time)\n\t\t\tprint(\"Iteration\", epoch, \"loss\", math.sqrt(cur_loss), \\\n\t\t\t\t\"train RMSE\", math.sqrt((train_preds._data[0])))\n\t\t\tif validation_smiles is not None:\n\t\t\t\tvalidation_preds = model.mse(validation_smiles, validation_raw_targets, undo_norm)\n\t\t\t\tprint(\"Validation RMSE\", epoch, \":\", math.sqrt((validation_preds._data[0])))\n\t\tprint(\"1 EPOCH TIME : \", time.time() - epoch_time)\n\t\t#print loss\n\n\t\t\n\treturn model, training_curve, undo_norm\n\ndef main():\n\tprint(\"Loading data...\")\n\ttraindata, valdata, testdata = load_data(\n\t\ttask_params['data_file'], (N_train, N_val, N_test),\n\t\tinput_name = 'smiles', target_name = task_params['target_name'])\n\tx_trains, y_trains = traindata\n\tx_vals, y_vals = valdata\n\tx_tests, y_tests = testdata\n\tx_trains = cp.reshape(x_trains, (N_train, 1))\n\ty_trains = cp.reshape(y_trains, (N_train, 1)).astype(cp.float32)\n\tx_vals = cp.reshape(x_vals, (N_val, 1))\n\ty_vals = cp.reshape(y_vals, (N_val, 1)).astype(cp.float32)\n\tx_tests = cp.reshape(x_tests, (N_test, 1))\n\ty_tests = cp.reshape(y_tests, (N_test, 1)).astype(cp.float32)\n\n\tdef run_conv_experiment():\n\t\t'''Initialize model'''\n\t\tNNFP = Main(model_params) \n\t\toptimizer = optimizers.Adam()\n\t\toptimizer.setup(NNFP)\n\n\t\tgpu_device = 0\n\t\tcuda.get_device(gpu_device).use()\n\t\tNNFP.to_gpu(gpu_device)\n\t\t#xp = cuda.cupy\n\t\t'''Learn'''\n\t\ttrained_NNFP, conv_training_curve, undo_norm = \\\n\t\t\ttrain_nn(NNFP, \n\t\t\t\t\t x_trains, y_trains, \n\t\t\t\t\t validation_smiles=x_vals, \n\t\t\t\t\t validation_raw_targets=y_vals)\n\t\treturn math.sqrt(trained_NNFP.mse(x_tests, y_tests, undo_norm)._data[0]), conv_training_curve\n\n\tprint(\"Starting neural fingerprint experiment...\")\n\ttest_loss_neural, conv_training_curve = run_conv_experiment()\n\tprint() \n\tprint(\"Neural test RMSE\", test_loss_neural)\n\t#result_plot(conv_training_curve, train_params)\n\nif __name__ == '__main__':\n\tmain()\n", "sub_path": "Graduation-thesis/NNFP_chainer/regression_gpu/chainer_regression.py", "file_name": "chainer_regression.py", "file_ext": "py", "file_size_in_byte": 4742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "cupy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "cupy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "cupy.exp", "line_number": 37, "usage_type": "call"}, {"api_name": "chainer.Chain", "line_number": 40, "usage_type": "name"}, {"api_name": "NNFP.Finger_print.FP", "line_number": 43, "usage_type": "call"}, {"api_name": "NNFP.Finger_print", "line_number": 43, "usage_type": "name"}, {"api_name": "NNFP.Deep_neural_network.DNN", "line_number": 44, "usage_type": "call"}, {"api_name": "NNFP.Deep_neural_network", "line_number": 44, "usage_type": "name"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "chainer.Variable", "line_number": 49, "usage_type": "call"}, {"api_name": "cupy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "cupy.float32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "chainer.functions.mean_squared_error", "line_number": 52, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 52, "usage_type": "name"}, {"api_name": "chainer.Variable", "line_number": 55, "usage_type": "call"}, {"api_name": "chainer.cuda.to_cpu", "line_number": 55, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 55, "usage_type": "name"}, {"api_name": "chainer.Variable", "line_number": 61, "usage_type": "call"}, {"api_name": "cupy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "cupy.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "chainer.functions.mean_squared_error", "line_number": 63, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 63, "usage_type": "name"}, {"api_name": "NNFP.normalize_array", "line_number": 69, "usage_type": "call"}, {"api_name": "chainer.optimizers.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 71, "usage_type": "name"}, {"api_name": "chainer.optimizer.WeightDecay", "line_number": 73, "usage_type": "call"}, {"api_name": "chainer.optimizer", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "name"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 94, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 103, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 106, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "NNFP.load_data", "line_number": 115, "usage_type": "call"}, {"api_name": "cupy.reshape", "line_number": 121, "usage_type": "call"}, {"api_name": "cupy.reshape", "line_number": 122, "usage_type": "call"}, {"api_name": "cupy.float32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "cupy.reshape", "line_number": 123, "usage_type": "call"}, {"api_name": "cupy.reshape", "line_number": 124, "usage_type": "call"}, {"api_name": "cupy.float32", "line_number": 124, "usage_type": "attribute"}, {"api_name": "cupy.reshape", "line_number": 125, "usage_type": "call"}, {"api_name": "cupy.reshape", "line_number": 126, "usage_type": "call"}, {"api_name": "cupy.float32", "line_number": 126, "usage_type": "attribute"}, {"api_name": "chainer.optimizers.Adam", "line_number": 131, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 131, "usage_type": "name"}, {"api_name": "chainer.cuda.get_device", "line_number": 135, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 135, "usage_type": "name"}, {"api_name": "NNFP.to_gpu", "line_number": 136, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "540318391", "text": "from django.shortcuts import render\nimport requests\nfrom .models import City\nfrom .forms import city_name\n\n\n\ndef index(request):\n url = 'http://api.openweathermap.org/data/2.5/forecast?q={}&APPID=4e3aadd76d19d3209156dccbff340001'\n\n form = city_name(request.POST)\n if request.method == 'POST':\n\n if form.is_valid():\n city = form.cleaned_data['name']\n else:\n city = \"Mumbai\" #Default City\n\n\n r = requests.get(url.format(city)).json()\n context = {\n 'city' : city,\n 'temperature' : r['list'][0]['main']['temp'],\n 'describe' : r['list'][0]['weather'][0]['description'] ,\n 'pressure' : r['list'][0]['main']['pressure'],\n 'sea_level' : r['list'][0]['main']['sea_level'],\n 'humidity' : r['list'][0]['main']['humidity'],\n 'form' : form,\n }\n\n return render(request,'weather/index.html',context = context)\n\n\ndef forms_view(request):\n form = forms.city_name()\n return render(request,'weather/forms.html',{'form' : form })\n", "sub_path": "the_weather/weather/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "forms.city_name", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.city_name", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "152089468", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuario', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='usuario',\n name='avatar',\n field=models.ImageField(default=datetime.datetime(2015, 10, 15, 13, 31, 43, 387558, tzinfo=utc), upload_to=b'usuarios/avatar/'),\n preserve_default=False,\n ),\n ]\n", "sub_path": "server/usuario/migrations/0002_usuario_avatar.py", "file_name": "0002_usuario_avatar.py", "file_ext": "py", "file_size_in_byte": 570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.timezone.utc", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "480973488", "text": "from django.shortcuts import render, HttpResponse, redirect\nfrom time import gmtime, strftime\n\n\n# Create your views here.\n\n# '/' OR '/session_words'\ndef index(request):\n return render(request,'words/index.html')\n\n\n# '/session_words/add'\ndef add(request):\n if 'words' not in request.session:\n request.session['words'] = []\n\n if 'size' not in request.POST:\n size = 'small'\n else:\n size = request.POST['size']\n \n time = strftime(\"%I:%M:%S %p, %B %d %Y\", gmtime())\n\n wordslist = request.session['words']\n wordslist.append({\n 'word': request.POST['word'],\n 'color': request.POST['color'],\n 'size': size,\n 'time': time\n })\n request.session['words'] = wordslist\n return redirect('/session_words')\n\n\n# '/session_words/clear'\ndef clear(request):\n request.session['words'] = []\n return redirect('/session_words')", "sub_path": "apps/words/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.shortcuts.render", "line_number": 9, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 22, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "283483950", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^post$', views.post, name='post'),\n url(r'^confirm_remove/(?P\\d+)$', views.confirm, name='confirm'),\n url(r'^remove/(?P\\d+)$', views.remove, name='remove'),\n url(r'^course/(?P\\d+)$', views.comment, name='comment'),\n url(r'^comment/(?P\\d+)$', views.postComment, name='postComment'),\n]\n", "sub_path": "apps/courses/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "310122589", "text": "##xmlファイルを読み込み、解析\n#\"\"\" xmlを読み込んで、新しいxmlに圧縮した値を作成\nimport xml.etree.ElementTree as ET\n\nFILE = 'pic_ (1).xml'\nfile = open(FILE)\ntree = ET.parse(file)\nroot = tree.getroot()\n\nall_list = []\n\n# 画像ファイル名を取得\nimg_name = root.find('filename').text\n\n# 画像ファイルのサイズ(幅・高さ)を取得\nimg_size = root.find('size')\nimg_w = int(img_size.find('width').text)\nimg_h = int(img_size.find('height').text)\n\nfor obj in root.iter('object'):\n cls = obj.find('name').text\n xmlbox = obj.find('bndbox')\n xmin = int(xmlbox.find('xmin').text)\n ymin = int(xmlbox.find('ymin').text)\n xmax = int(xmlbox.find('xmax').text)\n ymax = int(xmlbox.find('ymax').text)\n\n all_list.append([img_name] + [cls])\n\nprint(all_list)\nprint(\"画像サイズ width = {}, height = {}\".format(img_w,img_h))\n\nstring_ = '''\\\n\n {}\n \n original\n original\n XXX\n 0\n \n \n 0\n ?\n \n \n {}\n {}\n 3\n \n \n pudding\n Unspecified\n 1\n 0\n \n {}\n {}\n {}\n {}\n \n \n'''\n\nsize_changed = 500#画像変更サイズ\nx_ = size_changed / img_w#X軸方方向の圧縮倍率\ny_ = size_changed / img_h#X軸方方向の圧縮倍率\n\nre_img_w = round(img_w * x_)\nre_img_h = round(img_h * y_)\nre_xmin = round(xmin * x_)\nre_ymin = round(ymin * y_)\nre_xmax = round(xmax * x_)\nre_ymax = round(ymax * y_)\nprint(re_img_w,re_img_h, re_xmin, re_ymin, re_xmax, re_ymax)\n\nwith open('_9_pic_ (1)_pressed_value.xml', 'w') as f:\n f.write(string_.format(img_name,re_img_w, re_img_h, re_xmin,re_ymin,re_xmax,re_ymax))\n", "sub_path": "Python_Make_xml/_9_End_To_End_original_xml_maker.py", "file_name": "_9_End_To_End_original_xml_maker.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "xml.etree.ElementTree.parse", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "298593017", "text": "import pandas as pd\r\nimport numpy\r\nimport xlrd\r\n\r\ndef strip_punctuation(file):\r\n \r\n text = []\r\n punctuation_chars = [\"'\", '\"', \",\", \".\", \"!\", \":\", \";\", \"#\", \"@\", \"-\", \"(\", \")\", \"_\"]\r\n \r\n for list in file:\r\n for z in list:\r\n for y in punctuation_chars:\r\n if y in z:\r\n z = z.replace(y, '')\r\n text.append(z)\r\n\r\n tx = \" \".join(text)\r\n return tx.lower()\r\n\r\n\r\n\r\ndef get_pos(file):\r\n count = 0\r\n positive_word = []\r\n \r\n main_file = strip_punctuation(file)\r\n with open(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\positivewords.txt\") as posi:\r\n for lin in posi:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n positive_word.append(lin.strip()) \r\n for i in main_file.split(\" \"):\r\n for j in positive_word:\r\n if (j==i):\r\n count+=1\r\n return count\r\n\r\n\r\n\r\n\r\ndef get_neg(file):\r\n count = 0\r\n negative_word = []\r\n main_file = strip_punctuation(file)\r\n \r\n with open(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\negativewords.txt\") as negi:\r\n for lin in negi:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n negative_word.append(lin.strip())\r\n for i in main_file.split(\" \"): \r\n for j in negative_word: \r\n if j==i:\r\n count+=1\r\n return count\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef run(data):\r\n df = pd.DataFrame(columns=[\"Positive Score\", \"Negative Score\", \"Net Score\"])\r\n i = 1\r\n for sheet in data.sheets():\r\n list_of_lists=[]\r\n for row in range(sheet.nrows): \r\n for column in range (sheet.ncols):\r\n k = str(sheet.cell(row, column).value) \r\n line_list = k.split()\r\n list_of_lists.append(line_list)\r\n \r\n #Positive Score\r\n positive_score = get_pos(list_of_lists)\r\n \r\n #Negative Score\r\n negative_score = get_neg(list_of_lists)\r\n \r\n #Net Score\r\n net_score = positive_score - negative_score\r\n df.loc[i, ['Positive Score']] = positive_score\r\n df.loc[i, ['Negative Score']] = negative_score\r\n df.loc[i, ['Net Score']] = net_score \r\n i = i+1\r\n print(df)\r\n \r\nif __name__ == \"__main__\":\r\n \r\n data = xlrd.open_workbook(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\sample_movie_data.xlsx\")\r\n run(data)\r\n ", "sub_path": "data_modification_new.py", "file_name": "data_modification_new.py", "file_ext": "py", "file_size_in_byte": 2520, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "228419709", "text": "from django.contrib import admin\nfrom .models import ContactForm\n\n__author__ = \"Aniruddha Ravi\"\n__license__ = \"MIT\"\n__version__ = \"1.0.3\"\n__email__ = \"aniruddha.ravi@gmail.com\"\n__status__ = \"Development\"\n\n\nclass ContactFormAdmin(admin.ModelAdmin):\n class Meta:\n model = ContactForm\n\nadmin.site.register(ContactForm, ContactFormAdmin)\n", "sub_path": "mvp/contact/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 11, "usage_type": "name"}, {"api_name": "models.ContactForm", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 15, "usage_type": "call"}, {"api_name": "models.ContactForm", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "135635285", "text": "import configparser\nimport random\nimport string\nimport base64\n\nclass Salting():\n\n def read_salt_policy(self):\n # reads in password policy, returns variables as a list. Written by KW\n # password_policy = open('password_policy.txt', 'r')\n policy = configparser.ConfigParser()\n policy.read('password_policy.txt')\n salt_lowercase = policy.getint('Policy', 'salt_lowercase')\n salt_uppercase = policy.getint('Policy', 'salt_uppercase')\n salt_numbers = policy.getint('Policy', 'salt_numbers')\n\n return [int(salt_lowercase), int(salt_uppercase), int(salt_numbers)]\n\n\n def generate_salt(self):\n\n # reading through the password policy and looping through to extract necessary values to check and generates the password\n policy_checklist = self.read_salt_policy()\n max_length = sum(policy_checklist)\n\n # Generating the password using random and string modules\n salt = \"\"\n for character in range(max_length):\n\n random_character = random.randint(1,4)\n\n if random_character == 1:\n salt += random.choice(string.ascii_lowercase)\n elif random_character == 3:\n salt += random.choice(string.ascii_uppercase)\n else:\n salt += random.choice(string.digits)\n\n return salt\n\n def generate_base64_salt(self, salt):\n salt_64 = base64.b64encode(salt.encode())\n salt_final = salt_64.decode()\n #print(\"This is the final salt\" + salt_final)\n return salt_final\n", "sub_path": "Account-Generator/hashing/salting.py", "file_name": "salting.py", "file_ext": "py", "file_size_in_byte": 1559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "configparser.ConfigParser", "line_number": 11, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 30, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 33, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 35, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 37, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 37, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "478814717", "text": "# This file is part of the Trezor project.\n#\n# Copyright (C) 2012-2019 SatoshiLabs and contributors\n#\n# This library is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version 3\n# as published by the Free Software Foundation.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the License along with this library.\n# If not, see .\n\nimport pytest\n\nfrom trezorlib import btc, messages, tools\n\nfrom .. import bip32\nfrom ..common import MNEMONIC12\n\n\nclass TestMsgGetaddressShow:\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show(self, client):\n assert (\n btc.get_address(client, \"Bitcoin\", [1], show_display=True)\n == \"1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb\"\n )\n assert (\n btc.get_address(client, \"Bitcoin\", [2], show_display=True)\n == \"15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG\"\n )\n assert (\n btc.get_address(client, \"Bitcoin\", [3], show_display=True)\n == \"1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5\"\n )\n\n @pytest.mark.multisig\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show_multisig_3(self, client):\n node = bip32.deserialize(\n \"xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy\"\n )\n multisig = messages.MultisigRedeemScriptType(\n pubkeys=[\n messages.HDNodePathType(node=node, address_n=[1]),\n messages.HDNodePathType(node=node, address_n=[2]),\n messages.HDNodePathType(node=node, address_n=[3]),\n ],\n signatures=[b\"\", b\"\", b\"\"],\n m=2,\n )\n\n for i in [1, 2, 3]:\n assert (\n btc.get_address(\n client, \"Bitcoin\", [i], show_display=True, multisig=multisig\n )\n == \"3E7GDtuHqnqPmDgwH59pVC7AvySiSkbibz\"\n )\n\n @pytest.mark.skip_t1\n @pytest.mark.multisig\n def test_show_multisig_xpubs(self, client):\n nodes = [\n btc.get_public_node(\n client, tools.parse_path(f\"48h/0h/{i}h\"), coin_name=\"Bitcoin\"\n )\n for i in range(3)\n ]\n multisig = messages.MultisigRedeemScriptType(\n nodes=[n.node for n in nodes],\n signatures=[b\"\", b\"\", b\"\"],\n address_n=[0, 0],\n m=2,\n )\n\n xpubs = [[n.xpub[i * 16 : (i + 1) * 16] for i in range(5)] for n in nodes]\n\n for i in range(3):\n\n def input_flow():\n yield # show address\n assert client.debug.wait_layout().lines == [\n \"Multisig 2 of 3\",\n \"34yJV2b2GtbmxfZNw\",\n \"jPyuyUYkUbUnogqa8\",\n ]\n\n client.debug.press_no()\n yield # show QR code\n assert client.debug.wait_layout().text.startswith(\"Qr\")\n\n client.debug.press_no()\n yield # show XPUB#1\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #1 \" + (\"(yours)\" if i == 0 else \"(others)\")\n assert lines[1:] == xpubs[0]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_no()\n yield # show XPUB#2\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #2 \" + (\"(yours)\" if i == 1 else \"(others)\")\n assert lines[1:] == xpubs[1]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_no()\n yield # show XPUB#3\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #3 \" + (\"(yours)\" if i == 2 else \"(others)\")\n assert lines[1:] == xpubs[2]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_yes()\n\n with client:\n client.set_input_flow(input_flow)\n btc.get_address(\n client,\n \"Bitcoin\",\n tools.parse_path(f\"48h/0h/{i}h/0/0\"),\n show_display=True,\n multisig=multisig,\n script_type=messages.InputScriptType.SPENDMULTISIG,\n )\n\n @pytest.mark.multisig\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show_multisig_15(self, client):\n node = bip32.deserialize(\n \"xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy\"\n )\n\n pubs = []\n for x in range(15):\n pubs.append(messages.HDNodePathType(node=node, address_n=[x]))\n\n multisig = messages.MultisigRedeemScriptType(\n pubkeys=pubs, signatures=[b\"\"] * 15, m=15\n )\n\n for i in range(15):\n assert (\n btc.get_address(\n client, \"Bitcoin\", [i], show_display=True, multisig=multisig\n )\n == \"3QaKF8zobqcqY8aS6nxCD5ZYdiRfL3RCmU\"\n )\n", "sub_path": "tests/device_tests/test_msg_getaddress_show.py", "file_name": "test_msg_getaddress_show.py", "file_ext": "py", "file_size_in_byte": 5514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "trezorlib.btc.get_address", "line_number": 29, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 29, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_address", "line_number": 33, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 33, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_address", "line_number": 37, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 37, "usage_type": "name"}, {"api_name": "pytest.mark.setup_client", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}, {"api_name": "common.MNEMONIC12", "line_number": 26, "usage_type": "name"}, {"api_name": "trezorlib.messages.MultisigRedeemScriptType", "line_number": 47, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 47, "usage_type": "name"}, {"api_name": "trezorlib.messages.HDNodePathType", "line_number": 49, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 49, "usage_type": "name"}, {"api_name": "trezorlib.messages.HDNodePathType", "line_number": 50, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 50, "usage_type": "name"}, {"api_name": "trezorlib.messages.HDNodePathType", "line_number": 51, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 51, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_address", "line_number": 59, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 59, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pytest.mark.setup_client", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 42, "usage_type": "attribute"}, {"api_name": "common.MNEMONIC12", "line_number": 42, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_public_node", "line_number": 69, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 69, "usage_type": "name"}, {"api_name": "trezorlib.tools.parse_path", "line_number": 70, "usage_type": "call"}, {"api_name": "trezorlib.tools", "line_number": 70, "usage_type": "name"}, {"api_name": "trezorlib.messages.MultisigRedeemScriptType", "line_number": 74, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 74, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_address", "line_number": 125, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 125, "usage_type": "name"}, {"api_name": "trezorlib.tools.parse_path", "line_number": 128, "usage_type": "call"}, {"api_name": "trezorlib.tools", "line_number": 128, "usage_type": "name"}, {"api_name": "trezorlib.messages.InputScriptType", "line_number": 131, "usage_type": "attribute"}, {"api_name": "trezorlib.messages", "line_number": 131, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 66, "usage_type": "attribute"}, {"api_name": "trezorlib.messages.HDNodePathType", "line_number": 143, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 143, "usage_type": "name"}, {"api_name": "trezorlib.messages.MultisigRedeemScriptType", "line_number": 145, "usage_type": "call"}, {"api_name": "trezorlib.messages", "line_number": 145, "usage_type": "name"}, {"api_name": "trezorlib.btc.get_address", "line_number": 151, "usage_type": "call"}, {"api_name": "trezorlib.btc", "line_number": 151, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pytest.mark.setup_client", "line_number": 135, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 135, "usage_type": "attribute"}, {"api_name": "common.MNEMONIC12", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "94923814", "text": "# -*- coding: utf-8 -*-\n\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom . import models\nimport reversion\n\n\nclass UserStoriesService(object):\n @transaction.atomic\n def bulk_insert(self, project, user, data, callback_on_success=None):\n items = filter(lambda s: len(s) > 0,\n map(lambda s: s.strip(), data.split(\"\\n\")))\n\n for item in items:\n obj = models.UserStory.objects.create(subject=item, project=project, owner=user,\n status=project.default_us_status)\n if callback_on_success:\n callback_on_success(obj, True)\n\n @transaction.atomic\n def bulk_update_order(self, project, user, data):\n cursor = connection.cursor()\n\n sql = \"\"\"\n prepare bulk_update_order as update userstories_userstory set \"order\" = $1\n where userstories_userstory.id = $2 and\n userstories_userstory.project_id = $3;\n \"\"\"\n\n cursor.execute(sql)\n for usid, usorder in data:\n cursor.execute(\"EXECUTE bulk_update_order (%s, %s, %s);\",\n (usorder, usid, project.id))\n cursor.close()\n", "sub_path": "taiga/projects/userstories/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 1214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.transaction.atomic", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "362868462", "text": "from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n # General links\n path('', views.index, name='homepage'),\n path('contact/', views.contact, name='contact'),\n path('index/', views.index, name='homepage'),\n\n # Course-based links\n path(\n 'course/create/',\n views.CoursesCreate.as_view(),\n name='course_create'),\n path(\n 'course//',\n views.course_detail,\n name='course_detail'\n ),\n path(\n 'course//update/',\n views.CoursesUpdate.as_view(),\n name='course_update'\n ),\n path(\n 'course//delete/',\n views.CoursesDelete.as_view(),\n name='course_delete'\n ),\n\n # JAKE - PATH for Project Detailview\n path(\n 'course//addproject/',\n views.ProjectCreate,\n name='ProjectCreate'\n ),\n\n # PREV:\n path(\n 'assignment//',\n views.assignment_detail,\n name='assignment_detail'\n ),\n path(\n 'assignment//update/',\n views.ProjectUpdate,\n name='ProjectUpdate'\n ),\n\n\n path(\n 'assignment//addsubmission/',\n views.model_form_upload,\n name='model_form_upload'\n ),\n path(\n '/viewsubmission/',\n views.submission_detail,\n name='submission_detail'\n ),\n path(\n 'deletesubmission/',\n views.SubmissionDelete.as_view(),\n name='submission_delete'\n ),\n path(\n 'downloadsubmission/',\n views.submission_download,\n name='submission_download'\n ),\n\n\n # Invite-based links\n path(\n 'course/invite/',\n views.create_invite,\n name='invite_create'\n ),\n path(\n 'invite//delete/',\n views.InviteDelete.as_view(),\n name='invite_delete'\n ),\n path(\n 'email/',\n views.email,\n name='email'\n ),\n\n #JAMES - Links for Help pages\n path(\n 'help/instructor/',\n views.instructor_help,\n name='instructor_help'\n ),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "autograder/personal/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 63, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 81, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 86, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 93, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 101, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 101, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 101, "usage_type": "attribute"}]} +{"seq_id": "423566631", "text": "from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"contacts/\", views.contacts, name=\"contact\"),\n path(\"contacts/edit/\", views.edit, name=\"edit\"),\n path(\"contacts/delete/\", views.delete, name=\"delete\"),\n\n]", "sub_path": "contactBookApp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "533562354", "text": "# -*- coding: utf-8 -*-\n'''\nJust a simple test routine for checking if the integration scheme works properly.\n\n'''\n\nimport unittest\nimport copy\nimport numpy as np\nimport scipy as sp\n\nimport amfe\n\n#%%\n\nclass DynamicalSystem():\n\n def __init__(self, K, M, f_ext):\n self.q = []\n self.t = []\n self.K_int = K\n self.M_int = M\n self.D_int = M*0\n self.f_ext = f_ext\n\n def S_and_res(self, q, dq, ddq, dt, t, beta, gamma):\n S = self.K_int + 1/(beta*dt**2)*self.M_int\n f_ext = self.f_ext(q, dq, t)\n res = self.M_int @ ddq + self.K_int @ q - f_ext\n return S, res, f_ext\n\n def K(self):\n return self.K_int\n\n def M(self):\n return self.M_int\n\n def D(self):\n return self.D_int\n\n def write_timestep(self, t, q):\n self.t.append(t)\n self.q.append(q)\n\n def clear_timesteps(self):\n pass\n\n\nclass IntegratorTest(unittest.TestCase):\n def setUp(self):\n c1 = 10\n c2 = 20\n c3 = 10\n c4 = 0\n K = np.array([[c1 + c2,-c2,0],\n [-c2 , c2 + c3, -c3],\n [0, -c3, c3 + c4]])\n\n M = np.diag([3,1,2])\n\n omega = 2*np.pi*1\n amplitude = 5\n def f_ext(q, dq, t):\n return np.array([0, 0., amplitude*np.cos(omega*t)])\n\n\n self.my_system = DynamicalSystem(K, M, f_ext)\n\n self.q_start = np.array([1, 0, 2.])*0\n self.dq_start = np.zeros_like(self.q_start)\n\n self.T = np.arange(0,5,0.05)\n\n def test_linear_vs_nonlinear_integrator(self):\n dt = 1E-3\n alpha = 0.1\n system1 = self.my_system\n system2 = copy.deepcopy(self.my_system)\n\n amfe.integrate_nonlinear_system(system1, self.q_start, self.dq_start,\n self.T, dt, alpha)\n\n amfe.integrate_linear_system(system2, self.q_start, self.dq_start,\n self.T, dt, alpha)\n\n q_nl = sp.array(system1.q)\n t_nl = sp.array(system1.t)\n q_lin = sp.array(system2.q)\n t_lin = sp.array(system2.t)\n np.testing.assert_allclose(t_nl, t_lin, atol=1E-10)\n # why does that work and below not?\n assert(np.any(np.abs(q_nl - q_lin) < 1E-3))\n # np.testing.assert_allclose(q_nl, q_lin, rtol=1E-1, atol=1E-4)\n return q_nl, q_lin, t_lin\n\nif __name__ == '__main__':\n my_integrator_test = IntegratorTest()\n my_integrator_test.setUp()\n q_nl, q_lin, t = my_integrator_test.test_linear_vs_nonlinear_integrator()\n from matplotlib import pyplot\n pyplot.plot(t, q_nl)\n pyplot.plot(t, q_lin)\n\n\n #%%", "sub_path": "tests/test_integrator.py", "file_name": "test_integrator.py", "file_ext": "py", "file_size_in_byte": 2627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "unittest.TestCase", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 72, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 78, "usage_type": "call"}, {"api_name": "amfe.integrate_nonlinear_system", "line_number": 80, "usage_type": "call"}, {"api_name": "amfe.integrate_linear_system", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "scipy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "266795291", "text": "#!/usr/bin/python\n\n#This script takes a string and returns all permutations of it. \n#if you use perms.perms(\"apple\") you will get all permutations of Apple\n#in order to get all dictionary words, you will need to use perms.getRealWords(yourString)\n#Note that this is very processor intensive, so it is best to stick to 16 or fewer characters. \n#aeiousadfe\n\nimport itertools\n\n\ndef getPerms(somestring, length):\n return itertools.permutations(somestring, length)\n \ndef permsToList(allperms):\n listOfPerms=set()\n while allperms.next:\n try:\n tempstring=\"\"\n nextHolder=allperms.next()\n for charPOS in range(len(nextHolder)):\n tempstring=tempstring+nextHolder[charPOS]\n listOfPerms.add(tempstring)\n except StopIteration:\n break\n return listOfPerms\n\ndef perms(aString):\n allperms=[]\n for length in range(len(aString)+1):\n allperms.append(permsToList(getPerms(aString, length)))\n return allperms\n \ndef getWordList():\n words=set()\n wordFile=open('/home/jlmarks/words.txt','r')\n for word in wordFile:\n words.add(word[:-1])\n wordFile.close()\n return words\n \ndef getRealWords(aString):\n actualWords=set()\n words=getWordList()\n allPerms=perms(aString)\n for wordLength in range(1, len(allPerms)):\n actualWords=actualWords.union(words&allPerms[wordLength])\n return actualWords\n\ndef example():\n a=getRealWords(\"aeeiouysadtrghb\")\n", "sub_path": "scripts/python/anagrams/perms.py", "file_name": "perms.py", "file_ext": "py", "file_size_in_byte": 1480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "itertools.permutations", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "78039832", "text": "#!/usr/bin/env python\n# framework for retrieving map related data through weewar API (www.weewar.com)\n# using minidom\n# written by Mike McConnell\n\nimport re\nimport sys\nimport urllib\nfrom xml.dom import minidom\n\n\nclass Map:\n\n def __init__(self, id):\n \"\"\"\n Processes a map through API.\n Takes one argument (map ID), and stores all map information provided\n through API. Also generates counts on terrain types, starting units\n (by player) and starting bases (by player).\n \"\"\"\n # make sure ID passed is integer\n self.id = self._validate_int(id)\n \n # import api url\n baseurl = 'http://weewar.com/api1/map/'\n apiurl = baseurl + str(id)\n try:\n self.map_dom = minidom.parse(urllib.urlopen(apiurl))\n except:\n print(\"Error. Bad map link.\")\n\n # set map variables\n self.name = self._get_str_from_tag(self.map_dom, 'name')\n self.initialCredits = self._get_int_from_tag(self.map_dom, 'initialCredits')\n self.perBaseCredits = self._get_int_from_tag(self.map_dom, 'perBaseCredits')\n self.width = self._get_int_from_tag(self.map_dom, 'width')\n self.height = self._get_int_from_tag(self.map_dom, 'height')\n self.maxPlayers = self._get_int_from_tag(self.map_dom, 'maxPlayers')\n self.url = self._get_str_from_tag(self.map_dom, 'url')\n self.thumbnail = self._get_str_from_tag(self.map_dom, 'thumbnail')\n self.preview = self._get_str_from_tag(self.map_dom, 'preview')\n self.revision = self._get_int_from_tag(self.map_dom, 'revision')\n self.creator = self._get_str_from_tag(self.map_dom, 'creator')\n self.creatorProfile = self._get_str_from_tag(self.map_dom, 'creatorProfile')\n\n # regex patterns for grabbing terrain information\n typepattern = re.compile('type=(\"[a-zA-Z]+\")')\n unitpattern = re.compile('startUnit=(\"[a-zA-Z]+\")')\n ownerpattern = re.compile('startUnitOwner=(\"[0-9]\")')\n factionpattern = re.compile('startFaction=(\"[0-9]\")')\n \n # create dictionary to hold all terrain information\n self.terrain = {}\n \n # create list of dictionaries to hold unit and base information\n self.unit = [{} for player in range(self.maxPlayers)]\n self.base = [{} for player in range(self.maxPlayers)]\n\n # for each hex on map, record terrain, unit and base (if present)\n for hex in self.map_dom.getElementsByTagName('terrain'):\n \n # format terrain type information\n cur_hex = hex.toxml()\n type = re.search(typepattern, cur_hex).group(0)\n type = self._strip_tag(type, 'type')\n \n # count terrain type into dictionary\n if self.terrain.has_key(type):\n self.terrain[type] = self.terrain[type] + 1\n else:\n self.terrain[type] = 1\n\n # add starting unit information (if present)\n if re.search(unitpattern, cur_hex) and \\\n re.search(ownerpattern, cur_hex):\n \n # format start unit information\n startunit = re.search(unitpattern, cur_hex).group(0)\n startunit = self._strip_tag(startunit, 'startUnit')\n startunitowner = re.search(ownerpattern, cur_hex).group(0)\n startunitowner = self._strip_tag(startunitowner, 'startUnitOwner')\n\n # make sure unit extracted is an integer\n owner = self._validate_int(startunitowner)\n \n # count unit type into player's dictionary\n if self.unit[owner].has_key(startunit):\n self.unit[owner][startunit] = self.unit[owner][startunit] + 1\n else:\n self.unit[owner][startunit] = 1\n\n # add starting base information (if present)\n if re.search(typepattern, cur_hex) and \\\n re.search(factionpattern, cur_hex):\n\n # format start base information\n startbase = re.search(typepattern, cur_hex).group(0)\n startbase = self._strip_tag(startbase, 'type')\n startbaseowner = re.search(factionpattern, cur_hex).group(0)\n startbaseowner = self._strip_tag(startbaseowner, 'startFaction')\n\n # make sure base extracted is an integer\n owner = self._validate_int(startbaseowner)\n\n # count base type into player's dictionary\n if self.base[owner].has_key(startbase):\n self.base[owner][startbase] = self.base[owner][startbase] + 1\n else:\n self.base[owner][startbase] = 1\n\n # calculate total number of terrain (for percentages)\n self.total_terrain = 0\n for amount in self.terrain.values():\n self.total_terrain = self.total_terrain + amount\n\n def get_terrain_count(self, type):\n \"\"\"\n Takes one argument (terrain type).\n Returns number of terrain units on map. If type does not exist, returns\n None.\n \"\"\"\n if self.terrain.has_key(type):\n return self.terrain[type]\n else:\n return None\n\n def get_terrain_percentage(self, type, format=False):\n \"\"\"\n Takes one argument (terrain type). Optional second argument will format\n results to percentage (0.1882 -> '18.82%').\n Returns percentage of terrain (compared to all terrain). If type does\n not exist, returns None.\n \"\"\"\n if self.terrain.has_key(type):\n if (format == False):\n return (float(self.terrain[type]) / self.total_terrain)\n else:\n return \"{0:.2f}%\".format(float(self.terrain[type]) / \n self.total_terrain * 100)\n else:\n return None\n\n def get_starting_unit(self, player):\n \"\"\"\n Takes one argument (player number; starts at 0).\n Returns dictionary containing units player starts with.\n \"\"\"\n if self._validate_int(player) != None:\n return self.unit[player]\n \n def get_starting_base(self, player):\n \"\"\"\n Takes one argument (player number; starts at 0).\n Returns dictionary containing units player starts with.\n \"\"\"\n if self._validate_int(player) != None:\n return self.base[player]\n\n def _get_str_from_tag(self, dom, tag, count=0):\n \"\"\"\n Takes tag in given position and strips out brackets.\n Does not handle non-ascii characters.\n Returns as string.\n \"\"\"\n attr = dom.getElementsByTagName(tag)[count].toxml()\n attr = attr.replace(('<' + tag + '>'),'').replace((''),'')\n try:\n return str(attr)\n except ValueError:\n print(attr, \"could not be formatted as string\")\n\n def _get_int_from_tag(self, dom, tag, count=0):\n \"\"\"\n Takes tag in given position and strips out brackets.\n Returns as integer.\n \"\"\"\n attr = dom.getElementsByTagName(tag)[count].toxml()\n attr = attr.replace(('<' + tag + '>'),'').replace((''),'')\n try:\n return int(attr)\n except ValueError:\n print(attr, \"could not be formatted as integer\")\n \n def _strip_tag(self, tag, text):\n \"\"\"\n Removes quotation marks and tag information for legibility. 'text' is\n xml variable name.\n Returns as string.\n \"\"\"\n return str(tag.replace('\"', \"\").replace(text + '=', \"\"))\n\n def _validate_int(self, value):\n \"\"\"\n Returns value as integer.\n \"\"\"\n try:\n value = int(value)\n except ValueError:\n print(value, \"is not an integer.\")\n \n return value\n", "sub_path": "weemap.py", "file_name": "weemap.py", "file_ext": "py", "file_size_in_byte": 7897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "xml.dom.minidom.parse", "line_number": 28, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 28, "usage_type": "name"}, {"api_name": "urllib.urlopen", "line_number": 28, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 47, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 48, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "re.search", "line_number": 64, "usage_type": "call"}, {"api_name": "re.search", "line_number": 74, "usage_type": "call"}, {"api_name": "re.search", "line_number": 75, "usage_type": "call"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}, {"api_name": "re.search", "line_number": 80, "usage_type": "call"}, {"api_name": "re.search", "line_number": 93, "usage_type": "call"}, {"api_name": "re.search", "line_number": 94, "usage_type": "call"}, {"api_name": "re.search", "line_number": 97, "usage_type": "call"}, {"api_name": "re.search", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "646945333", "text": "import os\nimport logging\nfrom common.get_path import BASIC_PATH\nfrom common.get_config import config_data\n\n\ndef login_info(name, level, log_name, lf_level, ls_level):\n log = logging.getLogger(name)\n log.setLevel(level)\n\n formats = \"%(asctime)s - [%(filename)s-->line:%(lineno)d] - %(levelname)s: %(message)s\"\n log_format = logging.Formatter(formats)\n\n lf = logging.FileHandler(os.path.join(BASIC_PATH, log_name), encoding=\"utf-8\")\n lf.setLevel(lf_level)\n log.addHandler(lf)\n lf.setFormatter(log_format)\n\n ls = logging.StreamHandler()\n ls.setLevel(ls_level)\n log.addHandler(ls)\n ls.setFormatter(log_format)\n\n return log\n\n\nlog = login_info(name=config_data.get(\"login\", \"name\"),\n level=config_data.get(\"login\", \"level\"),\n log_name=config_data.get(\"login\", \"log_name\"),\n lf_level=config_data.get(\"login\", \"fh_level\"),\n ls_level=config_data.get(\"login\", \"sh_level\"))\n\n\n", "sub_path": "common/login_info.py", "file_name": "login_info.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "common.get_path.BASIC_PATH", "line_number": 14, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 19, "usage_type": "call"}, {"api_name": "common.get_config.config_data.get", "line_number": 27, "usage_type": "call"}, {"api_name": "common.get_config.config_data", "line_number": 27, "usage_type": "name"}, {"api_name": "common.get_config.config_data.get", "line_number": 28, "usage_type": "call"}, {"api_name": "common.get_config.config_data", "line_number": 28, "usage_type": "name"}, {"api_name": "common.get_config.config_data.get", "line_number": 29, "usage_type": "call"}, {"api_name": "common.get_config.config_data", "line_number": 29, "usage_type": "name"}, {"api_name": "common.get_config.config_data.get", "line_number": 30, "usage_type": "call"}, {"api_name": "common.get_config.config_data", "line_number": 30, "usage_type": "name"}, {"api_name": "common.get_config.config_data.get", "line_number": 31, "usage_type": "call"}, {"api_name": "common.get_config.config_data", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "309786625", "text": "from urllib.request import urlopen as http_req\nfrom urllib.request import Request\nfrom bs4 import BeautifulSoup as Soup\nfrom urllib import parse\nimport time\nimport sys\n\n\ndef get_soup(search_term, start):\n search_term = parse.quote_plus(search_term)\n url = f'https://www.google.dk/search?q={search_term}&start={start}'\n\n req = Request(url, data=None, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'})\n\n u_client = http_req(req)\n page_html = u_client.read()\n u_client.close()\n page_soup = Soup(page_html, \"html.parser\")\n results = page_soup.find(\"body\")\n return results\n\n\ndef check_anchor(href, site, start):\n if href.startswith(site):\n if start == 0:\n start = 1\n else:\n start = 1 + int((start / 10))\n print(f\"Search term found on page: {start}\")\n sys.exit()\n\n\ndef start_scraping(search_term, start, site):\n soup = get_soup(search_term, start)\n\n results = soup.findAll(\"div\", {\"class\": 'g'})\n\n for result in results:\n anchor = None\n if result is not None:\n anchor = result.find(\"div\")\n if anchor is not None:\n anchor = anchor.find(\"div\", {\"class\": 'rc'})\n if anchor is not None:\n anchor = anchor.find(\"div\", {\"class\": 'r'})\n if anchor is not None:\n anchor = anchor.find(\"a\")\n if anchor is not None:\n if anchor['href'] is not None:\n check_anchor(anchor['href'], site, start)\n\n if len(results) == 0:\n sys.exit(\"Your search was not found\")\n\n if len(results) != 0:\n print(\"Sleeping 15 seconds\")\n time.sleep(15)\n print(f\"Start: {start}\")\n start_scraping(search_term, start + 10, site)\n", "sub_path": "scrape.py", "file_name": "scrape.py", "file_ext": "py", "file_size_in_byte": 1817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.parse.quote_plus", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 10, "usage_type": "name"}, {"api_name": "urllib.request.Request", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "336648626", "text": "from data_loader import DataLoader\nfrom itertools import permutations\n\nclass FullSwitchSet:\n @staticmethod\n def call(set, h):\n total_time = sum(x[0] for x in set)\n due_date = int(h * total_time)\n minn = min([\n (FullSwitchSet.eval_result(perm, due_date), perm) for perm in permutations(set)\n ])\n return minn[0], list(minn[1])\n\n @staticmethod\n def eval_result(set, due_date):\n current_time = 0\n total_penelty = 0\n for [len, early, late] in set:\n current_time += len\n if current_time < due_date:\n total_penelty += early * (due_date - current_time)\n else:\n total_penelty += late * (current_time - due_date)\n return total_penelty\n", "sub_path": "full_switch_set.py", "file_name": "full_switch_set.py", "file_ext": "py", "file_size_in_byte": 690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "itertools.permutations", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "458888247", "text": "import json\nimport datetime\nfrom ast import literal_eval\n\ndef utc_diff(zone):\n m = zone%100\n h = zone//100\n return h *3600 + m * 60\n\ndef date_format(date):\n arr = date.split()\n date = \" \".join(arr[1:-1])\n zone = int(arr[-1])\n date_obj = datetime.datetime.strptime(date, '%d %b %Y %H:%M:%S')\n return date_obj, zone\n\ndef seconds_between(date1, date2):\n arr = []\n date1 = date1.replace(\"_\",\" \")\n date2 = date2.replace(\"_\",\" \")\n date_obj1, zone1 = date_format(date1)\n date_obj2, zone2 = date_format(date2)\n date_diff = int((date_obj1 - date_obj2).total_seconds())\n zone_diff = utc_diff(zone1) - utc_diff(zone2)\n arr.append(str(abs(date_diff - zone_diff)))\n \n return (json.dumps(arr))\n\n", "sub_path": "Task B/Seconds_between.py", "file_name": "Seconds_between.py", "file_ext": "py", "file_size_in_byte": 735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "63570652", "text": "# DB 연결\nimport pymysql\n\nconn = pymysql.connect(\n # private key\n)\n\ncurs = conn.cursor()\nprint(type(curs))\n\nsql = '''\nCREATE TABLE stock (\nstockcode varchar(255),\nstockname varchar(255),\ntime DATETIME,\nprice varchar(255),\nrate varchar(255))\n'''\ncurs.execute(sql)\nconn.commit()\nconn.close()\n", "sub_path": "Stock_Data/db_stock.py", "file_name": "db_stock.py", "file_ext": "py", "file_size_in_byte": 293, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pymysql.connect", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "303281341", "text": "from pprint import pprint\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nimport argparse\r\nimport time\r\nfrom decimal import *\r\n\r\ndef scan_movies(YearToFind,GenreToFind):\r\n region=boto3.session.Session().region_name\r\n dynamodb = boto3.resource('dynamodb', region_name=region) #low-level Client\r\n table = dynamodb.Table('movies') #define which dynamodb table to access\r\n\r\n recordcount = 0\r\n recordscannedcount = 0\r\n\r\n scanreturn = table.scan( # perform first scan\r\n FilterExpression=Key('year').eq(YearToFind) & Attr(\"genre\").eq(GenreToFind)\r\n )\r\n recordcount += scanreturn['Count']\r\n recordscannedcount += scanreturn['ScannedCount']\r\n while 'LastEvaluatedKey' in scanreturn.keys(): # if lastevaluatedkey is present, we need to keep scanning and adding to our counts until everything is scanned\r\n scanreturn = table.scan(\r\n FilterExpression=Key('year').eq(YearToFind) & Attr(\"genre\").eq(GenreToFind),\r\n ExclusiveStartKey = scanreturn['LastEvaluatedKey']\r\n )\r\n recordcount += scanreturn['Count']\r\n recordscannedcount += scanreturn['ScannedCount']\r\n return [recordcount, recordscannedcount]\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"Qyear\", help=\"Search by year and genre.. will return number of movies with that year and genre\")\r\n parser.add_argument(\"Qgenre\", help=\"Search by year and genre.. will return number of movies with that year and genre\")\r\n args = parser.parse_args()\r\n queryyear = Decimal(args.Qyear)\r\n querygenre = (args.Qgenre) #section to collect argument from command line\r\n\r\n start = time.time()\r\n movies = scan_movies(queryyear, querygenre) #scan_movies returns our total counts as two items of a list\r\n end = time.time()\r\n print(\"Count is \", movies[0]) # print the count of items returned by the scan\r\n print(\"ScannedCount is \", movies[1]) # print the count of items that had to be scanned to process the scan\r\n print('Total time: {} sec'.format(end - start))\r\n", "sub_path": "lab_reference_scripts/MoviesScanYG.py", "file_name": "MoviesScanYG.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "boto3.session.Session", "line_number": 9, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 9, "usage_type": "attribute"}, {"api_name": "boto3.resource", "line_number": 10, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 17, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 17, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 23, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "262659597", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^kmeans/', views.kmeans, name='kmeans'),\n url(r'^extraction/', views.extraction, name='extraction'),\n url(r'^reinit/', views.reinit, name='reinit'),\n\n]", "sub_path": "SpamDetector/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "169627085", "text": "#\n# MIT License\n#\n# Copyright (c) 2020 Airbyte\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\nimport json\nimport pathlib\nimport random\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef create():\n source_directory = pathlib.Path(__file__).resolve().parent.parent.parent\n configs_path = source_directory.joinpath(\"secrets/config.json\")\n with open(configs_path) as json_configs:\n configs = json.load(json_configs)\n auth = HTTPBasicAuth(configs.get(\"email\"), configs.get(\"api_token\"))\n base_api_url = f'https://{configs.get(\"domain\")}/rest/api/3/issue'\n\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n\n projects = [\"EX\", \"IT\", \"P2\", \"TESTKEY1\"]\n issue_types = [\"10001\", \"10002\", \"10004\"]\n\n for index in range(1, 76):\n payload = json.dumps(\n {\n \"fields\": {\n \"project\": {\"key\": random.choice(projects)},\n \"issuetype\": {\"id\": random.choice(issue_types)},\n \"summary\": f\"Test {index}\",\n \"description\": {\n \"type\": \"doc\",\n \"version\": 1,\n \"content\": [{\"type\": \"paragraph\", \"content\": [{\"type\": \"text\", \"text\": f\"Test description {index}\"}]}],\n },\n }\n }\n )\n\n requests.request(\"POST\", base_api_url, data=payload, headers=headers, auth=auth)\n\n\nif __name__ == \"__main__\":\n create()\n", "sub_path": "airbyte-integrations/connectors/source-jira/integration_tests/fixtures/create_issues.py", "file_name": "create_issues.py", "file_ext": "py", "file_size_in_byte": 2498, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pathlib.Path", "line_number": 35, "usage_type": "call"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 39, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 51, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "471464325", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# python\nimport base64\nimport re\nimport struct\nimport time\nimport urllib.parse\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse as dateutil_parse\n\n# django and drf\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom django.core.urlresolvers import NoReverseMatch\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import (\n serializers, relations, viewsets, filters, generics, status\n)\nfrom rest_framework.settings import api_settings\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ParseError\n\n\n# 3rd party\nfrom isodate import Duration, duration_isoformat, parse_duration\nfrom modeltranslation.translator import translator, NotRegistered\nfrom haystack.query import AutoQuery\nfrom munigeo.api import (\n GeoModelSerializer, GeoModelAPIView, build_bbox_filter, srid_to_srs\n)\nimport pytz\n\n# events\nfrom events import utils\nfrom events.custom_elasticsearch_search_backend import (\n CustomEsSearchQuerySet as SearchQuerySet\n)\nfrom events.models import (\n Place, Event, Keyword, Language, OpeningHoursSpecification, EventLink,\n Offer, DataSource, Organization \n)\nfrom events.translation import EventTranslationOptions\n\n\nSYSTEM_DATA_SOURCE_ID = 'system'\n\n\nserializers_by_model = {}\n\nall_views = []\ndef register_view(klass, name, base_name=None):\n entry = {'class': klass, 'name': name}\n if base_name is not None:\n entry['base_name'] = base_name\n all_views.append(entry)\n\n if klass.serializer_class and \\\n hasattr(klass.serializer_class, 'Meta') and \\\n hasattr(klass.serializer_class.Meta, 'model'):\n model = klass.serializer_class.Meta.model\n serializers_by_model[model] = klass.serializer_class\n\n\ndef urlquote_id(link):\n \"\"\"\n URL quote link's id part, e.g.\n http://127.0.0.1:8000/v0.1/place/tprek:20879/\n -->\n http://127.0.0.1:8000/v0.1/place/tprek%3A20879/\n This is DRF backwards compatibility function, 2.x quoted id automatically.\n\n :param link: URL str\n :return: quoted URL str\n \"\"\"\n if isinstance(link, str):\n parts = link.split('/')\n if len(parts) > 1 and ':' in parts[-2]:\n parts[-2] = urllib.parse.quote(parts[-2])\n link = '/'.join(parts)\n return link\n\n\ndef generate_id(namespace):\n t = time.time() * 1000\n postfix = base64.b32encode(struct.pack(\">Q\", int(t)).lstrip(b'\\x00'))\n postfix = postfix.strip(b'=').lower().decode(encoding='UTF-8')\n return '{}:{}'.format(namespace, postfix)\n\ndef parse_id_from_uri(uri):\n \"\"\"\n Parse id part from @id uri like\n 'http://127.0.0.1:8000/v0.1/event/matko%3A666/' -> 'matko:666'\n :param uri: str\n :return: str id\n \"\"\"\n if not uri.startswith('http'):\n return uri\n path = urllib.parse.urlparse(uri).path\n _id = path.rstrip('/').split('/')[-1]\n _id = urllib.parse.unquote(_id)\n return _id\n\ndef perform_id_magic_for(data):\n if 'id' in data:\n err = \"Do not send 'id' when POSTing a new Event (got id='{}')\"\n raise ParseError(err.format(data['id']))\n data['id'] = generate_id(data['data_source'])\n return data\n\n\nclass JSONLDRelatedField(relations.HyperlinkedRelatedField):\n \"\"\"\n Support of showing and saving of expanded JSON nesting or just a resource\n URL.\n Serializing is controlled by query string param 'expand', deserialization\n by format of JSON given.\n\n Default serializing is expand=true.\n \"\"\"\n\n invalid_json_error = _('Incorrect JSON. Expected JSON, received %s.')\n\n def __init__(self, *args, **kwargs):\n self.related_serializer = kwargs.pop('serializer', None)\n self.hide_ld_context = kwargs.pop('hide_ld_context', False)\n super(JSONLDRelatedField, self).__init__(*args, **kwargs)\n\n def use_pk_only_optimization(self):\n if self.is_expanded():\n return False\n else:\n return True\n\n def to_representation(self, obj):\n if isinstance(self.related_serializer, str):\n self.related_serializer = globals().get(self.related_serializer, None)\n if self.is_expanded():\n return self.related_serializer(obj, hide_ld_context=self.hide_ld_context,\n context=self.context).data\n link = super(JSONLDRelatedField, self).to_representation(obj)\n link = urlquote_id(link)\n return {\n '@id': link\n }\n\n def to_internal_value(self, value):\n if '@id' in value:\n return super(JSONLDRelatedField, self).to_internal_value(value['@id'])\n else:\n raise ValidationError(\n self.invalid_json_error % type(value).__name__)\n\n def is_expanded(self):\n return getattr(self, 'expanded', False)\n\n\nclass EnumChoiceField(serializers.Field):\n \"\"\"\n Database value of tinyint is converted to and from a string representation\n of choice field.\n\n TODO: Find if there's standardized way to render Schema.org enumeration\n instances in JSON-LD.\n \"\"\"\n\n def __init__(self, choices, prefix=''):\n self.choices = choices\n self.prefix = prefix\n super(EnumChoiceField, self).__init__()\n\n def to_representation(self, obj):\n if obj is None:\n return None\n return self.prefix + utils.get_value_from_tuple_list(self.choices,\n obj, 1)\n\n def to_internal_value(self, data):\n return utils.get_value_from_tuple_list(self.choices,\n self.prefix + str(data), 0)\n\n\nclass ISO8601DurationField(serializers.Field):\n\n def to_representation(self, obj):\n if obj:\n d = Duration(milliseconds=obj)\n return duration_isoformat(d)\n else:\n return None\n\n def to_internal_value(self, data):\n if data:\n value = parse_duration(data)\n return (\n value.days * 24 * 3600 * 1000000\n + value.seconds * 1000\n + value.microseconds / 1000\n )\n else:\n return 0\n\n\nclass MPTTModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(MPTTModelSerializer, self).__init__(*args, **kwargs)\n for field_name in 'lft', 'rght', 'tree_id', 'level':\n if field_name in self.fields:\n del self.fields[field_name]\n\n\nclass TranslatedModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(TranslatedModelSerializer, self).__init__(*args, **kwargs)\n model = self.Meta.model\n try:\n trans_opts = translator.get_options_for_model(model)\n except NotRegistered:\n self.translated_fields = []\n return\n\n self.translated_fields = trans_opts.fields.keys()\n lang_codes = [x[0] for x in settings.LANGUAGES]\n # Remove the pre-existing data in the bundle.\n for field_name in self.translated_fields:\n for lang in lang_codes:\n key = \"%s_%s\" % (field_name, lang)\n if key in self.fields:\n del self.fields[key]\n del self.fields[field_name]\n\n # def get_field(self, model_field):\n # kwargs = {}\n # if issubclass(\n # model_field.__class__,\n # (django_db_models.CharField,\n # django_db_models.TextField)):\n # if model_field.null:\n # kwargs['allow_none'] = True\n # kwargs['max_length'] = getattr(model_field, 'max_length')\n # return fields.CharField(**kwargs)\n # return super(TranslatedModelSerializer, self).get_field(model_field)\n\n def to_representation(self, obj):\n ret = super(TranslatedModelSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n return self.translated_fields_to_representation(obj, ret)\n\n def to_internal_value(self, data):\n \"\"\"\n Convert complex translated json objects to flat format.\n E.g. json structure containing `name` key like this:\n {\n \"name\": {\n \"fi\": \"musiikkiklubit\",\n \"sv\": \"musikklubbar\",\n \"en\": \"music clubs\"\n },\n ...\n }\n Transforms this:\n {\n \"name\": \"musiikkiklubit\",\n \"name_fi\": \"musiikkiklubit\",\n \"name_sv\": \"musikklubbar\",\n \"name_en\": \"music clubs\"\n ...\n }\n :param data:\n :return:\n \"\"\"\n lang = settings.LANGUAGES[0][0]\n for field_name in self.translated_fields:\n # FIXME: handle default lang like others!?\n lang = settings.LANGUAGES[0][0] # Handle default lang\n if data.get(field_name, None) is None:\n continue\n values = data[field_name].copy() # Save original values\n\n key = \"%s_%s\" % (field_name, lang)\n val = data[field_name].get(lang)\n if val:\n values[key] = val # field_name_LANG\n values[field_name] = val # field_name\n if lang in values:\n del values[lang] # Remove original key LANG\n for lang in [x[0] for x in settings.LANGUAGES[1:]]:\n key = \"%s_%s\" % (field_name, lang)\n val = data[field_name].get(lang)\n if val:\n values[key] = val # field_name_LANG\n values[field_name] = val # field_name\n if lang in values:\n del values[lang] # Remove original key LANG\n data.update(values)\n del data[field_name] # Remove original field_name from data\n\n # do remember to call the super class method as well!\n data.update(super().to_internal_value(data))\n\n return data\n\n def translated_fields_to_representation(self, obj, ret):\n for field_name in self.translated_fields:\n d = {}\n default_lang = settings.LANGUAGES[0][0]\n d[default_lang] = getattr(obj, field_name)\n for lang in [x[0] for x in settings.LANGUAGES[1:]]:\n key = \"%s_%s\" % (field_name, lang) \n val = getattr(obj, key, None)\n if val == None:\n continue \n d[lang] = val\n\n # If no text provided, leave the field as null\n for key, val in d.items():\n if val != None:\n break\n else:\n d = None\n ret[field_name] = d\n\n return ret\n\n\nclass LinkedEventsSerializer(TranslatedModelSerializer, MPTTModelSerializer):\n \"\"\"Serializer with the support for JSON-LD/Schema.org.\n\n JSON-LD/Schema.org syntax::\n\n {\n \"@context\": \"http://schema.org\",\n \"@type\": \"Event\",\n \"name\": \"Event name\",\n ...\n }\n\n See full example at: http://schema.org/Event\n\n Args:\n hide_ld_context (bool):\n Hides `@context` from JSON, can be used in nested\n serializers\n \"\"\"\n\n def __init__(self, instance=None, files=None,\n context=None, partial=False, many=None,\n allow_add_remove=False, hide_ld_context=False, **kwargs):\n super(LinkedEventsSerializer, self).__init__(\n instance=instance, context=context, **kwargs)\n if 'created_by' in self.fields:\n del self.fields['created_by']\n if 'modified_by' in self.fields:\n del self.fields['modified_by']\n\n if context is not None:\n include_fields = context.get('include', [])\n for field_name in include_fields:\n if not field_name in self.fields:\n continue\n field = self.fields[field_name]\n if isinstance(field, relations.ManyRelatedField):\n field = field.child_relation\n if not isinstance(field, JSONLDRelatedField):\n continue\n field.expanded = True\n\n self.hide_ld_context = hide_ld_context\n\n self.disable_camelcase = True\n if self.context and 'request' in self.context:\n request = self.context['request']\n if 'disable_camelcase' in request.QUERY_PARAMS:\n self.disable_camelcase = True\n\n def to_representation(self, obj):\n \"\"\"\n Before sending to renderer there's a need to do additional work on\n to-be-JSON dictionary data:\n 1. Add @context, @type and @id fields\n 2. Convert field names to camelCase\n Renderer is the right place for this but now loop is done just once.\n Reversal conversion is done in parser.\n \"\"\"\n ret = super(LinkedEventsSerializer, self).to_representation(obj)\n if 'id' in ret and 'request' in self.context:\n try:\n ret['@id'] = reverse(self.view_name,\n kwargs={u'pk': ret['id']},\n request=self.context['request'])\n except NoReverseMatch:\n ret['@id'] = str(ret['id'])\n ret['@id'] = urlquote_id(ret['@id'])\n\n # Context is hidden if:\n # 1) hide_ld_context is set to True\n # 2) self.object is None, e.g. we are in the list of stuff\n if not self.hide_ld_context and self.instance is not None:\n if hasattr(obj, 'jsonld_context') \\\n and isinstance(obj.jsonld_context, (dict, list)):\n ret['@context'] = obj.jsonld_context\n else:\n ret['@context'] = 'http://schema.org'\n\n # Use jsonld_type attribute if present,\n # if not fallback to automatic resolution by model name.\n # Note: Plan 'type' could be aliased to @type in context definition to\n # conform JSON-LD spec.\n if hasattr(obj, 'jsonld_type'):\n ret['@type'] = obj.jsonld_type\n else:\n ret['@type'] = obj.__class__.__name__\n\n return ret\n\n\ndef _clean_qp(query_params):\n \"\"\"\n Strip 'event.' prefix from all query params.\n :rtype : QueryDict\n :param query_params: dict self.request.QUERY_PARAMS\n :return: QueryDict QUERY_PARAMS\n \"\"\"\n query_params = query_params.copy() # do not alter original dict\n nspace = 'event.'\n for key in query_params.keys():\n if key.startswith(nspace):\n new_key = key[len(nspace):]\n # .pop() returns a list(?), don't use\n # query_params[new_key] = query_params.pop(key)\n query_params[new_key] = query_params[key]\n del query_params[key]\n return query_params\n\n\nclass KeywordSerializer(LinkedEventsSerializer):\n view_name = 'keyword-detail'\n\n class Meta:\n model = Keyword\n\n\nclass KeywordViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Keyword.objects.all()\n serializer_class = KeywordSerializer\n\n def get_queryset(self):\n \"\"\"\n Return Keyword queryset. If request has parameter show_all_keywords=1\n all Keywords are returned, otherwise only which have events.\n Additional query parameters:\n event.data_source\n event.start\n event.end\n \"\"\"\n queryset = Keyword.objects.all()\n if self.request.QUERY_PARAMS.get('show_all_keywords'):\n # Limit by data_source anyway, if it is set\n data_source = self.request.QUERY_PARAMS.get('data_source')\n if data_source:\n data_source = data_source.lower()\n queryset = queryset.filter(data_source=data_source)\n else:\n events = Event.objects.all()\n params = _clean_qp(self.request.QUERY_PARAMS)\n events = _filter_event_queryset(events, params)\n keyword_ids = events.values_list('keywords',\n flat=True).distinct().order_by()\n queryset = queryset.filter(id__in=keyword_ids)\n # Optionally filter keywords by filter parameter,\n # can be used e.g. with typeahead.js\n val = self.request.QUERY_PARAMS.get('filter')\n if val:\n queryset = queryset.filter(name__startswith=val)\n return queryset\n\nregister_view(KeywordViewSet, 'keyword')\n\n\nclass PlaceSerializer(LinkedEventsSerializer, GeoModelSerializer):\n view_name = 'place-detail'\n\n class Meta:\n model = Place\n\n\nclass PlaceViewSet(GeoModelAPIView, viewsets.ReadOnlyModelViewSet):\n queryset = Place.objects.all()\n serializer_class = PlaceSerializer\n\n def get_queryset(self):\n \"\"\"\n Return Place queryset. If request has parameter show_all_places=1\n all Places are returned, otherwise only which have events.\n Additional query parameters:\n event.data_source\n event.start\n event.end\n \"\"\"\n queryset = Place.objects.all()\n if self.request.QUERY_PARAMS.get('show_all_places'):\n pass\n else:\n events = Event.objects.all()\n params = _clean_qp(self.request.QUERY_PARAMS)\n events = _filter_event_queryset(events, params)\n location_ids = events.values_list('location_id',\n flat=True).distinct().order_by()\n queryset = queryset.filter(id__in=location_ids)\n return queryset\n\nregister_view(PlaceViewSet, 'place')\n\n\nclass OpeningHoursSpecificationSerializer(LinkedEventsSerializer):\n class Meta:\n model = OpeningHoursSpecification\n\n\nclass LanguageSerializer(LinkedEventsSerializer):\n view_name = 'language-detail'\n\n class Meta:\n model = Language\n\n\nclass LanguageViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Language.objects.all()\n serializer_class = LanguageSerializer\n\nregister_view(LanguageViewSet, 'language')\n\nLOCAL_TZ = pytz.timezone(settings.TIME_ZONE)\n\nclass EventLinkSerializer(serializers.ModelSerializer):\n def to_representation(self, obj):\n ret = super(EventLinkSerializer, self).to_representation(obj)\n if not ret['name']:\n ret['name'] = None\n return ret\n\n class Meta:\n model = EventLink\n exclude = ['id', 'event']\n\nclass OfferSerializer(TranslatedModelSerializer):\n class Meta:\n model = Offer\n exclude = ['id', 'event']\n\n\nclass EventSerializer(LinkedEventsSerializer, GeoModelAPIView):\n location = JSONLDRelatedField(serializer=PlaceSerializer, required=False,\n view_name='place-detail', read_only=True)\n # provider = OrganizationSerializer(hide_ld_context=True)\n keywords = JSONLDRelatedField(serializer=KeywordSerializer, many=True,\n required=False,\n view_name='keyword-detail', read_only=True)\n super_event = JSONLDRelatedField(required=False, view_name='event-detail',\n read_only=True)\n event_status = EnumChoiceField(Event.STATUSES)\n external_links = EventLinkSerializer(many=True)\n offers = OfferSerializer(many=True)\n sub_events = JSONLDRelatedField(serializer='EventSerializer',\n required=False, view_name='event-detail',\n many=True, read_only=True)\n\n view_name = 'event-detail'\n\n def __init__(self, *args, skip_empties=False, skip_fields=set(), **kwargs):\n super(EventSerializer, self).__init__(*args, **kwargs)\n # The following can be used when serializing when\n # testing and debugging.\n self.skip_empties = skip_empties\n self.skip_fields = skip_fields\n\n def get_location(self, data):\n \"\"\"\n Replace location id dict in data with a Place object\n \"\"\"\n location = data.get('location')\n if location and '@id' in location:\n location_id = parse_id_from_uri(location['@id'])\n try:\n data['location'] = Place.objects.get(id=location_id)\n except Place.DoesNotExist:\n err = 'Place with id {} does not exist'\n raise ParseError(err.format(location_id))\n return data\n\n def get_keywords(self, data):\n \"\"\"\n Replace list of keyword dicts in data with a list of Keyword objects\n \"\"\"\n new_kw = []\n\n for kw in data.get('keywords', []):\n\n if '@id' in kw:\n kw_id = parse_id_from_uri(kw['@id'])\n\n try:\n keyword = Keyword.objects.get(id=kw_id)\n except Keyword.DoesNotExist:\n err = 'Keyword with id {} does not exist'\n raise ParseError(err.format(kw_id))\n\n new_kw.append(keyword)\n\n data['keywords'] = new_kw\n return data\n\n def get_datetimes(self, data):\n for field in ['date_published', 'start_time', 'end_time']:\n val = data.get(field, None)\n if val:\n if isinstance(val, str):\n data[field] = parse_time(val, True)\n return data\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n\n # TODO: figure out how to get this via JSONLDRelatedField\n if 'location' in data:\n location_id = parse_id_from_uri(data['location']['@id'])\n data['location'] = Place.objects.get(id=location_id)\n\n # TODO: figure out how to get these via JSONLDRelatedField\n data = self.get_keywords(data)\n\n return data\n\n def create(self, validated_data):\n offers = validated_data.pop('offers', [])\n links = validated_data.pop('external_links', [])\n keywords = validated_data.pop('keywords', [])\n\n # create object\n e = Event.objects.create(**validated_data)\n\n # create and add related objects \n for offer in offers:\n Offer.objects.create(event=e, **offer)\n for link in links:\n EventLink.objects.create(event=e, **link)\n e.keywords.add(*keywords)\n\n return e\n\n def update(self, instance, validated_data):\n\n # prepare a list of fields to be updated\n update_fields = [\n 'start_time', 'end_time', 'location'\n ]\n\n languages = [x[0] for x in settings.LANGUAGES]\n for field in EventTranslationOptions.fields:\n for lang in languages:\n update_fields.append(field + '_' + lang)\n\n # update values\n for field in update_fields:\n orig_value = getattr(instance, field)\n new_value = validated_data.get(field, orig_value)\n setattr(instance, field, new_value)\n\n # also update `has_end_time` if needed\n if instance.end_time:\n instance.has_end_time = True\n\n # save changes\n instance.save()\n\n # update offers\n if 'offers' in validated_data:\n instance.offers.all().delete()\n for offer in validated_data.get('offers', []):\n Offer.objects.create(event=instance, **offer)\n\n # update ext links\n if 'external_links' in validated_data:\n instance.external_links.all().delete()\n for link in validated_data.get('external_links', []):\n EventLink.objects.create(event=instance, **link)\n\n # update keywords\n instance.keywords.clear() \n instance.keywords.add(*validated_data['keywords'])\n\n return instance\n\n def to_representation(self, obj):\n ret = super(EventSerializer, self).to_representation(obj)\n if 'start_time' in ret and not obj.has_start_time:\n # Return only the date part\n ret['start_time'] = obj.start_time.astimezone(LOCAL_TZ).strftime('%Y-%m-%d')\n if 'end_time' in ret and not obj.has_end_time:\n # If we're storing only the date part, do not pretend we have the exact time.\n if obj.end_time - obj.start_time <= timedelta(days=1):\n ret['end_time'] = None\n if hasattr(obj, 'days_left'):\n ret['days_left'] = int(obj.days_left)\n if self.skip_empties:\n for k in list(ret.keys()):\n val = ret[k]\n try:\n if val is None or len(val) == 0:\n del ret[k]\n except TypeError:\n # not list/dict\n pass\n for field in self.skip_fields:\n del ret[field]\n return ret\n\n class Meta:\n model = Event\n exclude = ['has_start_time', 'has_end_time', 'is_recurring_super']\n\n\ndef parse_time(time_str, is_start):\n time_str = time_str.strip()\n # Handle dates first. Assume dates are given in local timezone.\n # FIXME: What if there's no local timezone?\n try:\n dt = datetime.strptime(time_str, '%Y-%m-%d')\n dt = LOCAL_TZ.localize(dt)\n except ValueError:\n dt = None\n if not dt:\n if time_str.lower() == 'today':\n dt = datetime.utcnow().replace(tzinfo=pytz.utc)\n dt = dt.astimezone(LOCAL_TZ)\n dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)\n if dt:\n # With start timestamps, we treat dates as beginning\n # at midnight the same day. End timestamps are taken to\n # mean midnight on the following day.\n if not is_start:\n dt = dt + timedelta(days=1)\n else:\n try:\n # Handle all other times through dateutil.\n dt = dateutil_parse(time_str)\n except (TypeError, ValueError):\n raise ParseError('time in invalid format (try ISO 8601 or yyyy-mm-dd)')\n return dt\n\n\nclass JSONAPIViewSet(viewsets.ReadOnlyModelViewSet):\n def initial(self, request, *args, **kwargs):\n ret = super(JSONAPIViewSet, self).initial(request, *args, **kwargs)\n self.srs = srid_to_srs(self.request.QUERY_PARAMS.get('srid', None))\n return ret\n\n def get_serializer_context(self):\n context = super(JSONAPIViewSet, self).get_serializer_context()\n\n include = self.request.QUERY_PARAMS.get('include', '')\n context['include'] = [x.strip() for x in include.split(',') if x]\n context['srs'] = self.srs\n\n return context\n\n\nclass LinkedEventsOrderingFilter(filters.OrderingFilter):\n ordering_param = 'sort'\n\n\nclass EventOrderingFilter(LinkedEventsOrderingFilter):\n def filter_queryset(self, request, queryset, view):\n queryset = super(EventOrderingFilter, self).filter_queryset(request, queryset, view)\n ordering = self.get_ordering(request, queryset, view)\n if not ordering:\n ordering = []\n if 'days_left' in [x.lstrip('-') for x in ordering]:\n queryset = queryset.extra(select={'days_left': 'date_part(\\'day\\', end_time - start_time)'})\n return queryset\n\n\ndef parse_duration(duration):\n m = re.match(r'(\\d+)\\s*(d|h|m|s)?$', duration.strip().lower())\n if not m:\n raise ParseError(\"Invalid duration supplied. Try '1d' or '2h'.\")\n val, unit = m.groups()\n if not unit:\n unit = 's'\n\n if unit == 'm':\n mul = 60\n elif unit == 'h':\n mul = 3600\n elif unit == 'd':\n mul = 24 * 3600\n\n return int(val) * mul\n\ndef _filter_event_queryset(queryset, params, srs=None):\n \"\"\"\n Filter events queryset by params\n (e.g. self.request.QUERY_PARAMS in EventViewSet)\n \"\"\"\n # Filter by string (case insensitive). This searches from all fields\n # which are marked translatable in translation.py\n val = params.get('text', None)\n if val:\n val = val.lower()\n # Free string search from all translated fields\n fields = EventTranslationOptions.fields\n # and these languages\n languages = [x[0] for x in settings.LANGUAGES]\n qset = Q()\n for field in fields:\n for lang in languages:\n kwarg = {field + '_' + lang + '__icontains': val}\n qset |= Q(**kwarg)\n queryset = queryset.filter(qset)\n\n val = params.get('last_modified_since', None)\n # This should be in format which dateutil.parser recognizes, e.g.\n # 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time)\n # or 2014-10-29T12:00:00+0200 (local time)\n if val:\n dt = parse_time(val, is_start=False)\n queryset = queryset.filter(Q(last_modified_time__gte=dt))\n\n val = params.get('start', None)\n if val:\n dt = parse_time(val, is_start=True)\n queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))\n\n val = params.get('end', None)\n if val:\n dt = parse_time(val, is_start=False)\n queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))\n\n val = params.get('bbox', None)\n if val:\n bbox_filter = build_bbox_filter(srs, val, 'position')\n places = Place.geo_objects.filter(**bbox_filter)\n queryset = queryset.filter(location__in=places)\n\n val = params.get('data_source', None)\n if val:\n queryset = queryset.filter(data_source=val)\n\n # Filter by location id, multiple ids separated by comma\n val = params.get('location', None)\n if val:\n val = val.split(',')\n queryset = queryset.filter(location_id__in=val)\n\n # Filter by keyword id, multiple ids separated by comma\n val = params.get('keyword', None)\n if val:\n val = val.split(',')\n queryset = queryset.filter(keywords__pk__in=val)\n\n # Filter only super or sub events if recurring has value\n val = params.get('recurring', None)\n if val:\n val = val.lower()\n if val == 'super':\n queryset = queryset.filter(is_recurring_super=True)\n elif val == 'sub':\n queryset = queryset.filter(is_recurring_super=False)\n\n val = params.get('max_duration', None)\n if val:\n dur = parse_duration(val)\n cond = 'end_time - start_time <= %s :: interval'\n queryset = queryset.extra(where=[cond], params=[str(dur)])\n\n val = params.get('min_duration', None)\n if val:\n dur = parse_duration(val)\n cond = 'end_time - start_time >= %s :: interval'\n queryset = queryset.extra(where=[cond], params=[str(dur)])\n\n return queryset\n\n\nclass EventViewSet(viewsets.ModelViewSet, JSONAPIViewSet):\n \"\"\"\n # Filtering retrieved events\n\n Query parameters can be used to filter the retrieved events by\n the following criteria.\n\n ## Event time\n\n Use `start` and `end` to restrict the date range of returned events.\n Any events that intersect with the given date range will be returned.\n\n The parameters `start` and `end` can be given in the following formats:\n\n - ISO 8601 (including the time of day)\n - yyyy-mm-dd\n\n In addition, `today` can be used as the value.\n\n Example:\n\n event/?start=2014-01-15&end=2014-01-20\n\n [See the result](?start=2014-01-15&end=2014-01-20 \"json\")\n\n ## Event location\n\n ### Bounding box\n\n To restrict the retrieved events to a geographical region, use\n the query parameter `bbox` in the format\n\n bbox=west,south,east,north\n\n Where `west` is the longitude of the rectangle's western boundary,\n `south` is the latitude of the rectangle's southern boundary,\n and so on.\n\n Example:\n\n event/?bbox=24.9348,60.1762,24.9681,60.1889\n\n [See the result](?bbox=24.9348,60.1762,24.9681,60.1889 \"json\")\n\n # Getting detailed data\n\n In the default case, keywords, locations, and other fields that\n refer to separate resources are only displayed as simple references.\n\n If you want to include the complete data from related resources in\n the current response, use the keyword `include`. For example:\n\n event/?include=location,keywords\n\n [See the result](?include=location,keywords \"json\")\n\n # Response data for the current URL\n\n \"\"\"\n queryset = Event.objects.all()\n # Use select_ and prefetch_related() to reduce the amount of queries\n queryset = queryset.select_related('location')\n queryset = queryset.prefetch_related(\n 'offers', 'keywords', 'external_links', 'sub_events')\n serializer_class = EventSerializer\n filter_backends = (EventOrderingFilter,)\n ordering_fields = ('start_time', 'end_time', 'days_left')\n\n def get_object(self):\n # Overridden to prevent queryset filtering from being applied\n # outside list views.\n return get_object_or_404(Event.objects.all(), pk=self.kwargs['pk'])\n\n def filter_queryset(self, queryset):\n \"\"\"\n TODO: convert to use proper filter framework\n \"\"\"\n\n queryset = super(EventViewSet, self).filter_queryset(queryset)\n\n if 'show_all' not in self.request.QUERY_PARAMS:\n queryset = queryset.filter(\n Q(event_status=Event.SCHEDULED)\n )\n queryset = _filter_event_queryset(queryset, self.request.QUERY_PARAMS,\n srs=self.srs)\n return queryset\n\n\n def get_authorized_publisher(self, request, data):\n user = request.user\n\n # require user\n assert user.is_authenticated(), 'User needs to be authenticated.'\n\n # require permission to publish\n objs = user.organizations.all()\n assert objs, 'User needs to be authorized to publish events.'\n assert objs.count() == 1, (\n 'User is connected to multiple organizations. This is currently '\n 'not supported.'\n )\n\n # pick publisher\n data['publisher'] = objs.first().id\n return data\n\n def create(self, request, *args, **kwargs):\n data = request.data\n\n # all events created by api are marked coming from the system data\n # source\n data['data_source'] = SYSTEM_DATA_SOURCE_ID\n\n # get publisher from the auth user\n data = self.get_authorized_publisher(request, data)\n\n # generate event id\n data = perform_id_magic_for(data)\n\n # then do the usual stuff defined in `rest_framework.CreateModelMixin`\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n self.perform_create(serializer)\n\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n\n\nregister_view(EventViewSet, 'event')\n\n\nclass SearchSerializer(serializers.Serializer):\n def to_representation(self, search_result):\n model = search_result.model\n assert model in serializers_by_model, \"Serializer for %s not found\" % model\n ser_class = serializers_by_model[model]\n data = ser_class(search_result.object, context=self.context).data\n data['object_type'] = model._meta.model_name\n data['score'] = search_result.score\n return data\n\nDATE_DECAY_SCALE = '30d'\n\nclass SearchViewSet(GeoModelAPIView, viewsets.ViewSetMixin, generics.ListAPIView):\n serializer_class = SearchSerializer\n\n def list(self, request, *args, **kwargs):\n languages = [x[0] for x in settings.LANGUAGES]\n\n # If the incoming language is not specified, go with the default.\n self.lang_code = request.QUERY_PARAMS.get('language', languages[0])\n if self.lang_code not in languages:\n raise ParseError(\"Invalid language supplied. Supported languages: %s\" %\n ','.join(languages))\n\n input_val = request.QUERY_PARAMS.get('input', '').strip()\n q_val = request.QUERY_PARAMS.get('q', '').strip()\n if not input_val and not q_val:\n raise ParseError(\"Supply search terms with 'q=' or autocomplete entry with 'input='\")\n if input_val and q_val:\n raise ParseError(\"Supply either 'q' or 'input', not both\")\n\n old_language = translation.get_language()[:2]\n translation.activate(self.lang_code)\n\n queryset = SearchQuerySet()\n if input_val:\n queryset = queryset.filter(autosuggest=input_val)\n now = datetime.utcnow()\n queryset = queryset.filter(end_time__gt=now).decay({\n 'gauss': {\n 'end_time': {\n 'origin': now,\n 'scale': DATE_DECAY_SCALE }}})\n else:\n queryset = queryset.filter(text=AutoQuery(q_val))\n\n self.object_list = queryset.load_all()\n\n page = self.paginate_queryset(self.object_list)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(self.object_list, many=True)\n resp = Response(serializer.data)\n\n translation.activate(old_language)\n\n return resp\n\n\nregister_view(SearchViewSet, 'search', base_name='search')\n", "sub_path": "events/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 37180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "urllib.parse.parse.quote", "line_number": 85, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 85, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 85, "usage_type": "name"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "base64.b32encode", "line_number": 92, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 92, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urlparse", "line_number": 105, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 105, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 105, "usage_type": "name"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 107, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 107, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 107, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 113, "usage_type": "call"}, {"api_name": "rest_framework.relations.HyperlinkedRelatedField", "line_number": 118, "usage_type": "attribute"}, {"api_name": "rest_framework.relations", "line_number": 118, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 128, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 157, "usage_type": "call"}, {"api_name": "rest_framework.serializers.Field", "line_number": 164, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 164, "usage_type": "name"}, {"api_name": "events.utils.get_value_from_tuple_list", "line_number": 181, "usage_type": "call"}, {"api_name": "events.utils", "line_number": 181, "usage_type": "name"}, {"api_name": "events.utils.get_value_from_tuple_list", "line_number": 185, "usage_type": "call"}, {"api_name": "events.utils", "line_number": 185, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Field", "line_number": 189, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 189, "usage_type": "name"}, {"api_name": "isodate.Duration", "line_number": 193, "usage_type": "call"}, {"api_name": "isodate.duration_isoformat", "line_number": 194, "usage_type": "call"}, {"api_name": "isodate.parse_duration", "line_number": 200, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 210, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 210, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 218, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 218, "usage_type": "name"}, {"api_name": "modeltranslation.translator.translator.get_options_for_model", "line_number": 223, "usage_type": "call"}, {"api_name": "modeltranslation.translator.translator", "line_number": 223, "usage_type": "name"}, {"api_name": "modeltranslation.translator.NotRegistered", "line_number": 224, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 229, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 229, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 279, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 279, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 282, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 282, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 294, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 294, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 313, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 313, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 315, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 315, "usage_type": "name"}, {"api_name": "rest_framework.relations.ManyRelatedField", "line_number": 369, "usage_type": "attribute"}, {"api_name": "rest_framework.relations", "line_number": 369, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 395, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.NoReverseMatch", "line_number": 398, "usage_type": "name"}, {"api_name": "events.models.Keyword", "line_number": 447, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 450, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 450, "usage_type": "name"}, {"api_name": "events.models.Keyword.objects.all", "line_number": 451, "usage_type": "call"}, {"api_name": "events.models.Keyword.objects", "line_number": 451, "usage_type": "attribute"}, {"api_name": "events.models.Keyword", "line_number": 451, "usage_type": "name"}, {"api_name": "events.models.Keyword.objects.all", "line_number": 463, "usage_type": "call"}, {"api_name": "events.models.Keyword.objects", "line_number": 463, "usage_type": "attribute"}, {"api_name": "events.models.Keyword", "line_number": 463, "usage_type": "name"}, {"api_name": "events.models.Event.objects.all", "line_number": 471, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 471, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 471, "usage_type": "name"}, {"api_name": "events.values_list", "line_number": 474, "usage_type": "call"}, {"api_name": "munigeo.api.GeoModelSerializer", "line_number": 487, "usage_type": "name"}, {"api_name": "events.models.Place", "line_number": 491, "usage_type": "name"}, {"api_name": "munigeo.api.GeoModelAPIView", "line_number": 494, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 494, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 494, "usage_type": "name"}, {"api_name": "events.models.Place.objects.all", "line_number": 495, "usage_type": "call"}, {"api_name": "events.models.Place.objects", "line_number": 495, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 495, "usage_type": "name"}, {"api_name": "events.models.Place.objects.all", "line_number": 507, "usage_type": "call"}, {"api_name": "events.models.Place.objects", "line_number": 507, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 507, "usage_type": "name"}, {"api_name": "events.models.Event.objects.all", "line_number": 511, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 511, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 511, "usage_type": "name"}, {"api_name": "events.values_list", "line_number": 514, "usage_type": "call"}, {"api_name": "events.models.OpeningHoursSpecification", "line_number": 524, "usage_type": "name"}, {"api_name": "events.models.Language", "line_number": 531, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 534, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 534, "usage_type": "name"}, {"api_name": "events.models.Language.objects.all", "line_number": 535, "usage_type": "call"}, {"api_name": "events.models.Language.objects", "line_number": 535, "usage_type": "attribute"}, {"api_name": "events.models.Language", "line_number": 535, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 540, "usage_type": "call"}, {"api_name": "django.conf.settings.TIME_ZONE", "line_number": 540, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 540, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 542, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 542, "usage_type": "name"}, {"api_name": "events.models.EventLink", "line_number": 550, "usage_type": "name"}, {"api_name": "events.models.Offer", "line_number": 555, "usage_type": "name"}, {"api_name": "munigeo.api.GeoModelAPIView", "line_number": 559, "usage_type": "name"}, {"api_name": "events.models.Event.STATUSES", "line_number": 568, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 568, "usage_type": "name"}, {"api_name": "events.models.Place.objects.get", "line_number": 592, "usage_type": "call"}, {"api_name": "events.models.Place.objects", "line_number": 592, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 592, "usage_type": "name"}, {"api_name": "events.models.Place.DoesNotExist", "line_number": 593, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 593, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 595, "usage_type": "call"}, {"api_name": "events.models.Keyword.objects.get", "line_number": 610, "usage_type": "call"}, {"api_name": "events.models.Keyword.objects", "line_number": 610, "usage_type": "attribute"}, {"api_name": "events.models.Keyword", "line_number": 610, "usage_type": "name"}, {"api_name": "events.models.Keyword.DoesNotExist", "line_number": 611, "usage_type": "attribute"}, {"api_name": "events.models.Keyword", "line_number": 611, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 613, "usage_type": "call"}, {"api_name": "events.models.Place.objects.get", "line_number": 634, "usage_type": "call"}, {"api_name": "events.models.Place.objects", "line_number": 634, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 634, "usage_type": "name"}, {"api_name": "events.models.Event.objects.create", "line_number": 647, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 647, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 647, "usage_type": "name"}, {"api_name": "events.models.Offer.objects.create", "line_number": 651, "usage_type": "call"}, {"api_name": "events.models.Offer.objects", "line_number": 651, "usage_type": "attribute"}, {"api_name": "events.models.Offer", "line_number": 651, "usage_type": "name"}, {"api_name": "events.models.EventLink.objects.create", "line_number": 653, "usage_type": "call"}, {"api_name": "events.models.EventLink.objects", "line_number": 653, "usage_type": "attribute"}, {"api_name": "events.models.EventLink", "line_number": 653, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 665, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 665, "usage_type": "name"}, {"api_name": "events.translation.EventTranslationOptions.fields", "line_number": 666, "usage_type": "attribute"}, {"api_name": "events.translation.EventTranslationOptions", "line_number": 666, "usage_type": "name"}, {"api_name": "events.models.Offer.objects.create", "line_number": 687, "usage_type": "call"}, {"api_name": "events.models.Offer.objects", "line_number": 687, "usage_type": "attribute"}, {"api_name": "events.models.Offer", "line_number": 687, "usage_type": "name"}, {"api_name": "events.models.EventLink.objects.create", "line_number": 693, "usage_type": "call"}, {"api_name": "events.models.EventLink.objects", "line_number": 693, "usage_type": "attribute"}, {"api_name": "events.models.EventLink", "line_number": 693, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 708, "usage_type": "call"}, {"api_name": "events.models.Event", "line_number": 726, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 735, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 735, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 741, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 741, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 741, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 749, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 753, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 755, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ReadOnlyModelViewSet", "line_number": 759, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 759, "usage_type": "name"}, {"api_name": "munigeo.api.srid_to_srs", "line_number": 762, "usage_type": "call"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 775, "usage_type": "attribute"}, {"api_name": "rest_framework.filters", "line_number": 775, "usage_type": "name"}, {"api_name": "re.match", "line_number": 791, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 793, "usage_type": "call"}, {"api_name": "events.translation.EventTranslationOptions.fields", "line_number": 818, "usage_type": "attribute"}, {"api_name": "events.translation.EventTranslationOptions", "line_number": 818, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 820, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 820, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 821, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 825, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 834, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 839, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 844, "usage_type": "call"}, {"api_name": "munigeo.api.build_bbox_filter", "line_number": 848, "usage_type": "call"}, {"api_name": "events.models.Place.geo_objects.filter", "line_number": 849, "usage_type": "call"}, {"api_name": "events.models.Place.geo_objects", "line_number": 849, "usage_type": "attribute"}, {"api_name": "events.models.Place", "line_number": 849, "usage_type": "name"}, {"api_name": "isodate.parse_duration", "line_number": 879, "usage_type": "call"}, {"api_name": "isodate.parse_duration", "line_number": 885, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 892, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 892, "usage_type": "name"}, {"api_name": "events.models.Event.objects.all", "line_number": 951, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 951, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 951, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 963, "usage_type": "call"}, {"api_name": "events.models.Event.objects.all", "line_number": 963, "usage_type": "call"}, {"api_name": "events.models.Event.objects", "line_number": 963, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 963, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 974, "usage_type": "call"}, {"api_name": "events.models.Event.SCHEDULED", "line_number": 974, "usage_type": "attribute"}, {"api_name": "events.models.Event", "line_number": 974, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 1018, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 1020, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Serializer", "line_number": 1028, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 1028, "usage_type": "name"}, {"api_name": "munigeo.api.GeoModelAPIView", "line_number": 1040, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ViewSetMixin", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 1040, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 1040, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 1040, "usage_type": "name"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 1044, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 1044, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 1049, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 1055, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ParseError", "line_number": 1057, "usage_type": "call"}, {"api_name": "django.utils.translation.get_language", "line_number": 1059, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 1059, "usage_type": "name"}, {"api_name": "django.utils.translation.activate", "line_number": 1060, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 1060, "usage_type": "name"}, {"api_name": "events.custom_elasticsearch_search_backend.CustomEsSearchQuerySet", "line_number": 1062, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1065, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1065, "usage_type": "name"}, {"api_name": "haystack.query.AutoQuery", "line_number": 1072, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 1082, "usage_type": "call"}, {"api_name": "django.utils.translation.activate", "line_number": 1084, "usage_type": "call"}, {"api_name": "django.utils.translation", "line_number": 1084, "usage_type": "name"}]} +{"seq_id": "277497372", "text": "import http.server\nimport socketserver\nimport threading\nimport socketserver\n\ndef create_proxy_server():\n class RedirectServer(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(301)\n self.send_header(\"Location\",\"http://www.meatspin.com\")\n self.end_headers()\n\n return RedirectServer\n\n\ndef set_up_proxy():\n print(\"Created port\")\n redirectHandler = create_proxy_server()\n handler = socketserver.TCPServer((\"127.0.0.1\", 8000), redirectHandler)\n print(\"serving at port 8000\")\n handler.serve_forever()\n\n\nif __name__ == \"__main__\":\n main_thread = threading.Thread(target=set_up_proxy)\n main_thread.setDaemon(False)\n main_thread.start()\n", "sub_path": "bones.py", "file_name": "bones.py", "file_ext": "py", "file_size_in_byte": 726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "http.server.server", "line_number": 7, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 7, "usage_type": "name"}, {"api_name": "socketserver.TCPServer", "line_number": 19, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "366594310", "text": "from django import http\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.views import View\nfrom django.core.paginator import Paginator\nfrom indexs.models import VideoModel\nimport random\nfrom indexs.models import ClassificationModel\n\nfrom indexs.models import MovieDelailModel\n\n\nclass VideoView(View):\n def get(self, request, view_id):\n try:\n video_delail = MovieDelailModel.objects.get(video_id=view_id)\n except:\n video_delail = ''\n try:\n video = VideoModel.objects.get(id=view_id)\n categroy = ClassificationModel.objects.filter(video=view_id)\n categroy = [i.categroy for i in categroy]\n reco = ClassificationModel.objects.filter(categroy__in=categroy)\n reco_video = [res.video for res in reco]\n video_order = VideoModel.objects.all().order_by(\"-activate\")[:8]#7376-1\n\n except Exception as e:\n return http.HttpResponseNotFound(e)\n page = Paginator(reco_video,10)\n pagesize = page.num_pages\n intpage = random.randint(0,pagesize)\n reco_video = [] if intpage==0 else page.page(intpage)\n\n\n context = {\n \"video\":video,\n \"categroy\":categroy,\n \"reco_video\":reco_video,\n \"video_order\":video_order,\n \"video_delail\":video_delail,\n }\n return render(request, \"views.html\",context=context)", "sub_path": "iQIYI/iQIYI/apps/videoviews/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.views.View", "line_number": 14, "usage_type": "name"}, {"api_name": "indexs.models.MovieDelailModel.objects.get", "line_number": 17, "usage_type": "call"}, {"api_name": "indexs.models.MovieDelailModel.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "indexs.models.MovieDelailModel", "line_number": 17, "usage_type": "name"}, {"api_name": "indexs.models.VideoModel.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "indexs.models.VideoModel.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "indexs.models.VideoModel", "line_number": 21, "usage_type": "name"}, {"api_name": "indexs.models.ClassificationModel.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "indexs.models.ClassificationModel.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "indexs.models.ClassificationModel", "line_number": 22, "usage_type": "name"}, {"api_name": "indexs.models.ClassificationModel.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "indexs.models.ClassificationModel.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "indexs.models.ClassificationModel", "line_number": 24, "usage_type": "name"}, {"api_name": "indexs.models.VideoModel.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "indexs.models.VideoModel.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "indexs.models.VideoModel", "line_number": 26, "usage_type": "name"}, {"api_name": "django.http.HttpResponseNotFound", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http", "line_number": 29, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 30, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "435712690", "text": "\"\"\"School URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom django.contrib.auth import views as auth_views\r\n\r\nfrom users import views\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', views.home, name='home'),\r\n path('register/', views.register, name='register'),\r\n\r\n path(\"login/\", views.login_request, name='login'),\r\n path('logout/', views.logout_request, name='logout'),\r\n\r\n path('welcome/', views.welcome_page, name='welcome'),\r\n path('quiz/', views.quiz_page, name='quiz'),\r\n path('result/', views.result, name='result'),\r\n\r\n path('full_result/', views.full_result, name='full_result')\r\n]", "sub_path": "School/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "users.views.home", "line_number": 24, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "users.views.register", "line_number": 25, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "users.views.login_request", "line_number": 27, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "users.views.logout_request", "line_number": 28, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "users.views.welcome_page", "line_number": 30, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "users.views.quiz_page", "line_number": 31, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "users.views.result", "line_number": 32, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "users.views.full_result", "line_number": 34, "usage_type": "attribute"}, {"api_name": "users.views", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "47697334", "text": "import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('panda.jpg',cv2.IMREAD_COLOR)\r\ncv2.line(img,(0,0),(150,150),(255,0,0),15) #drawing line\r\ncv2.rectangle(img,(250,250),(750,500),(0,0,255),10) #drawing rectangle\r\ncv2.circle(img,(640,320),20,(0,0,255),-1) #drawing circle thickness -1 fills up the circle\r\n\r\npts= np.array([[50,100],[250,100],[960,500],[50,500],[750,250]],np.int32) # points of polygon\r\ncv2.polylines(img,[pts],True,(255,255,0),3) #drawing a polygon\r\n\r\nfont = cv2.FONT_HERSHEY_SIMPLEX # font\r\ncv2.putText(img,'TEST',(750,600),font,3,(0,255,255),6,cv2.LINE_AA) #writing text\r\n\r\ncv2.imshow('image',img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n", "sub_path": "Test/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 659, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 4, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "308936967", "text": "from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(name='fhem',\n version='0.5.5',\n description='Python API for FHEM home automation server',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='fhem home automation',\n url='http://github.com/domschl/python-fhem',\n author='Dominik Schloesser',\n author_email='dsc@dosc.net',\n license='MIT',\n packages=['fhem'],\n zip_safe=False)\n", "sub_path": "fhem/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "198504753", "text": "# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n# pylint: disable=protected-access\n\n\"\"\"Contains functionality for sending telemetry to Application Insights via OpenCensus Azure Monitor Exporter.\"\"\"\n\nimport logging\n\n# import platform\nfrom os import getenv\n\n# from opencensus.ext.azure.log_exporter import AzureLogHandler\n\n# from azure.ai.ml._user_agent import USER_AGENT\n\n\nAML_INTERNAL_LOGGER_NAMESPACE = \"azure.ai.ml._telemetry\"\n\n# vienna-sdk-unitedstates\nINSTRUMENTATION_KEY = \"71b954a8-6b7d-43f5-986c-3d3a6605d803\"\n\nAZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR = \"AZUREML_SDKV2_TELEMETRY_OPTOUT\"\n\n# application insight logger name\nLOGGER_NAME = \"ApplicationInsightLogger\"\n\nSUCCESS = True\nFAILURE = False\n\nTRACEBACK_LOOKUP_STR = \"Traceback (most recent call last)\"\n\n# extract traceback path from message\nreformat_traceback = True\n\ntest_subscriptions = [\n \"b17253fa-f327-42d6-9686-f3e553e24763\",\n \"test_subscription\",\n \"6560575d-fa06-4e7d-95fb-f962e74efd7a\",\n \"b17253fa-f327-42d6-9686-f3e553e2452\",\n \"74eccef0-4b8d-4f83-b5f9-fa100d155b22\",\n \"4faaaf21-663f-4391-96fd-47197c630979\",\n \"00000000-0000-0000-0000-000000000\",\n]\n\n\nclass CustomDimensionsFilter(logging.Filter):\n \"\"\"Add application-wide properties to AzureLogHandler records\"\"\"\n\n def __init__(self, custom_dimensions=None): # pylint: disable=super-init-not-called\n self.custom_dimensions = custom_dimensions or {}\n\n def filter(self, record):\n \"\"\"Adds the default custom_dimensions into the current log record\"\"\"\n custom_dimensions = self.custom_dimensions.copy()\n custom_dimensions.update(getattr(record, \"custom_dimensions\", {}))\n record.custom_dimensions = custom_dimensions\n\n return True\n\n\ndef in_jupyter_notebook() -> bool:\n \"\"\"\n Checks if user is using a Jupyter Notebook. This is necessary because logging is not allowed in\n non-Jupyter contexts.\n\n Adapted from https://stackoverflow.com/a/22424821\n \"\"\"\n try: # cspell:ignore ipython\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config:\n return False\n except ImportError:\n return False\n except AttributeError:\n return False\n return True\n\n\ndef is_telemetry_collection_disabled():\n telemetry_disabled = getenv(AZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR)\n if telemetry_disabled and (telemetry_disabled.lower() == \"true\" or telemetry_disabled == \"1\"):\n return True\n if not in_jupyter_notebook:\n return True\n return False\n\n\n# def get_appinsights_log_handler(\n# user_agent,\n# *args, # pylint: disable=unused-argument\n# instrumentation_key=None,\n# component_name=None,\n# **kwargs\n# ):\n# \"\"\"Enable the OpenCensus logging handler for specified logger and instrumentation key to send info to AppInsights.\n\n# :param user_agent: Information about the user's browser.\n# :type user_agent: Dict[str, str]\n# :param instrumentation_key: The Application Insights instrumentation key.\n# :type instrumentation_key: str\n# :param component_name: The component name.\n# :type component_name: str\n# :param args: Optional arguments for formatting messages.\n# :type args: list\n# :param kwargs: Optional keyword arguments for adding additional information to messages.\n# :type kwargs: dict\n# :return: The logging handler.\n# :rtype: opencensus.ext.azure.log_exporter.AzureLogHandler\n# \"\"\"\n# try:\n# if instrumentation_key is None:\n# instrumentation_key = INSTRUMENTATION_KEY\n\n# if is_telemetry_collection_disabled():\n# return logging.NullHandler()\n\n# if not user_agent or not user_agent.lower() == USER_AGENT.lower():\n# return logging.NullHandler()\n\n# if \"properties\" in kwargs and \"subscription_id\" in kwargs.get(\"properties\"):\n# if kwargs.get(\"properties\")[\"subscription_id\"] in test_subscriptions:\n# return logging.NullHandler()\n\n# child_namespace = component_name or __name__\n# current_logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE).getChild(child_namespace)\n# current_logger.propagate = False\n# current_logger.setLevel(logging.CRITICAL)\n\n# custom_properties = {\"PythonVersion\": platform.python_version()}\n# custom_properties.update({\"user_agent\": user_agent})\n# if \"properties\" in kwargs:\n# custom_properties.update(kwargs.pop(\"properties\"))\n# handler = AzureLogHandler(connection_string=f'InstrumentationKey={instrumentation_key}')\n# current_logger.addHandler(handler)\n# handler.addFilter(CustomDimensionsFilter(custom_properties))\n\n# return handler\n# except Exception: # pylint: disable=broad-except\n# # ignore exceptions, telemetry should not block\n# return logging.NullHandler()\n", "sub_path": "sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py", "file_name": "logging_handler.py", "file_ext": "py", "file_size_in_byte": 4913, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "logging.Filter", "line_number": 48, "usage_type": "attribute"}, {"api_name": "IPython.get_ipython", "line_number": 73, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "28370439", "text": "\"\"\"cofouter URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url, patterns\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom cofouter import views\nfrom wiki.urls import get_pattern as get_wiki_pattern\nfrom django_nyt.urls import get_pattern as get_nyt_pattern\n\nurlpatterns = [\n url(r'^$', views.landing, name=\"landing\"),\n url(r'^register/$', views.register, name=\"register\"),\n url(r'^about/$', views.about, name=\"about\"),\n url(r'^evesso.*', views.ssologin, name=\"evesso\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^applications/', include('applications.urls', namespace='applications')),\n url(r'^srp/', include('srp.urls', namespace='srp')),\n url(r'^reddit/', include('subreddit.urls', namespace='subreddit')),\n url(r'^hipchat/', include('hipchat.urls', namespace='hipchat')),\n url(r'^timerboard/', include('timerboard.urls', namespace='timerboard')),\n url(r'^corpmarket/', include('corpmarket.urls', namespace=\"corpmarket\")),\n url(r'^helpdesk/', include('helpdesk.urls', namespace=\"helpdesk\")),\n url(r'^skillchecker/', include('skillchecker.urls', namespace=\"skillchecker\")),\n url(r'^wikinotifications/', get_nyt_pattern()),\n url(r'^wiki/', get_wiki_pattern()),\n url(r'^', include('core.urls', namespace='core')),\n]\n\nif settings.DEBUG:\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT,\n }),\n )", "sub_path": "cofouter/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "cofouter.views.landing", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cofouter.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "cofouter.views.register", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cofouter.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "cofouter.views.about", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cofouter.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "cofouter.views.ssologin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cofouter.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django_nyt.urls.get_pattern", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "wiki.urls.get_pattern", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.urls.patterns", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "211118085", "text": "import argparse\nimport json\nimport os\nimport nltk\nimport torch\nimport numpy as np\nimport tensorflow as tf\nfrom model import dcn_plus_model\nfrom nltk.tokenize.moses import MosesDetokenizer\nfrom preprocessing.cove_encoder import MTLSTM as CoveEncoder\n\ndef load_glove(filename):\n vocab_dict = {}\n embedding = []\n file = open(filename, 'r')\n for id, line in enumerate(file.readlines()):\n row = line.strip().split(' ')\n if len(row) != 301:\n continue\n vocab_dict[row[0]] = id\n embedding.append([float(i) for i in row[1:]])\n file.close()\n embedding.append([0] * len(embedding[0]))\n return vocab_dict, embedding\n\n\ndef get_vocab_id(word, vocab_dict):\n if vocab_dict.get(word) is None:\n return len(vocab_dict)\n else:\n return vocab_dict[word]\n\n\ndef pad_ids(id_list, vocab_dict, max_sequence_length):\n if len(id_list) >= max_sequence_length:\n return id_list[:max_sequence_length]\n else:\n return id_list + [len(vocab_dict)] * (max_sequence_length - len(id_list))\n\n\ndef pad_tokens(tokens, max_sequence_length):\n if len(tokens) >= max_sequence_length:\n return tokens[:max_sequence_length]\n else:\n pad_token = \"\".encode('utf-8')\n return tokens + [pad_token] * (max_sequence_length - len(tokens))\n\n\ndef document_to_tensor(document, vocab_dict, embedding, max_sequence_length, cove_encoder):\n tokens = [token.replace(\"``\", '\"').replace(\n \"''\", '\"') for token in nltk.word_tokenize(document)]\n length = [min(len(tokens), max_sequence_length)]\n tokens = pad_tokens(tokens, max_sequence_length)\n ids = pad_ids([get_vocab_id(token, vocab_dict)\n for token in tokens], vocab_dict, max_sequence_length)\n tensor = [embedding[id] for id in ids]\n if cove_encoder is not None:\n inputs = torch.autograd.Variable(\n torch.LongTensor(np.asarray(ids))).unsqueeze(0).cuda()\n length = torch.LongTensor(np.asarray(length)).cuda()\n document_tensor, document_cove = cove_encoder(inputs, length)\n\n if document_cove.shape[1] < 600:\n document_cove = torch.cat([document_cove, torch.autograd.Variable(\n torch.zeros(1, 600 - document_cove.shape[1], 600)).cuda()], 1)\n document_tensor = torch.cat(\n [document_tensor, document_cove], 2).squeeze(0).data.cpu().numpy()\n\n for i in range(max_sequence_length):\n if ids[i] == len(vocab_dict):\n document_tensor[i] = np.zeros(900)\n tensor = document_tensor\n #document = tf.transpose(tf.constant(\n # np.expand_dims(np.array(tensor), axis=0)), [0, 2, 1])\n document = tf.constant(np.expand_dims(np.array(tensor), axis=0))\n length = tf.constant(np.expand_dims(np.array(length), axis=0))\n return document, length\n\n\ndef input_fn(context, question, context_length, question_length, context_tokens):\n \"\"\"\n features = [OrderedDict([('context', context), ('question', question), ('context_length', context_length),\n ('question_length', question_length), ('context_tokens', tf.constant(context_tokens)), ('id', tf.constant(np.array(\"id\")))])]\n dtypes = OrderedDict([('context', tf.float32), ('question', tf.float32), ('context_length', tf.int64),\n ('question_length', tf.int64), ('context_tokens', tf.string), ('id', tf.string)])\n shapes = OrderedDict([('context', context.shape), ('question', question.shape), ('context_length', context_length.shape),\n ('question_length', question_length.shape), ('context_tokens', tf.constant(context_tokens).shape), ('id', tf.constant(np.array(\"id\")).shape)])\n train_data = tf.data.Dataset.from_generator(lambda: (feature for feature in features), dtypes, shapes)\n \"\"\"\n context_tokens = np.expand_dims(context_tokens, axis=0)\n features = {'context': context, 'question': question, 'context_length': context_length, 'question_length': question_length, 'context_tokens': tf.constant(context_tokens), 'id': tf.constant(np.array([\"id\"]))}\n train_data = tf.data.Dataset.from_tensors(features)\n iterator = train_data.make_one_shot_iterator()\n return iterator.get_next()\n\n\nif __name__ == '__main__':\n params = json.load(open('params.json'))\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n max_sequence_length = params['model']['max_sequence_length']\n parser = argparse.ArgumentParser()\n parser.add_argument('--glove_file')\n parser.add_argument('--use_cove', action='store_true')\n parser.add_argument('--model_dir', nargs='?', default='pretrained', type=str)\n args = parser.parse_args()\n\n glove_file = args.glove_file\n if glove_file is None:\n print(\"Glove file needed\")\n\n else:\n context = input(\"Context: \")\n question = input(\"Question: \")\n vocab_dict, embedding = load_glove(glove_file)\n\n cove_encoder = None\n if args.use_cove:\n cove_encoder = CoveEncoder(n_vocab=len(\n embedding), vectors=torch.FloatTensor(embedding), residual_embeddings=False)\n cove_encoder.cuda()\n\n context_tokens = [token.replace(\"``\", '\"').replace(\n \"''\", '\"') for token in nltk.word_tokenize(context)]\n context_embedding, context_length = document_to_tensor(\n context, vocab_dict, embedding, max_sequence_length, cove_encoder)\n question_embedding, question_length = document_to_tensor(\n question, vocab_dict, embedding, max_sequence_length, cove_encoder)\n dcn_estimator = tf.estimator.Estimator(\n model_fn=dcn_plus_model, params=params['model'], model_dir=args.model_dir)\n prediction = dcn_estimator.predict(input_fn=lambda: input_fn(\n context_embedding, question_embedding, context_length, question_length, context_tokens))\n prediction = list(prediction)[0]\n detokenizer = nltk.tokenize.moses.MosesDetokenizer()\n prediction = detokenizer.detokenize([token.decode(\n 'utf-8') for token in prediction['context_tokens'][prediction['start']:prediction['end']+1]], return_str=True)\n print(\"Answer: {}\".format(prediction))\n", "sub_path": "interactive.py", "file_name": "interactive.py", "file_ext": "py", "file_size_in_byte": 5820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "nltk.word_tokenize", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensors", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 92, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 98, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 99, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 101, "usage_type": "call"}, {"api_name": "preprocessing.cove_encoder.MTLSTM", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 119, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.estimator.Estimator", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.estimator", "line_number": 128, "usage_type": "attribute"}, {"api_name": "model.dcn_plus_model", "line_number": 129, "usage_type": "name"}, {"api_name": "nltk.tokenize.moses.MosesDetokenizer", "line_number": 133, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 133, "usage_type": "attribute"}]} +{"seq_id": "540917336", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalog', '0003_auto_20150502_1639'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='category',\n options={'ordering': ('position',), 'verbose_name': 'категория', 'verbose_name_plural': 'категории'},\n ),\n migrations.AlterModelOptions(\n name='tattoo',\n options={'ordering': ('position',), 'verbose_name': 'тату', 'verbose_name_plural': 'тату'},\n ),\n migrations.AddField(\n model_name='tattoo',\n name='is_main',\n field=models.BooleanField(verbose_name='для главной', default=False),\n ),\n migrations.AlterField(\n model_name='category',\n name='position',\n field=models.PositiveSmallIntegerField(verbose_name='cортировка', help_text='Индекс сортировки (по возрастанию).', default=0),\n ),\n migrations.AlterField(\n model_name='tattoo',\n name='position',\n field=models.PositiveSmallIntegerField(verbose_name='cортировка', help_text='Индекс сортировки (по возрастанию).', default=0),\n ),\n ]\n", "sub_path": "catalog/migrations/0004_auto_20150512_1251.py", "file_name": "0004_auto_20150512_1251.py", "file_ext": "py", "file_size_in_byte": 1404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "498527786", "text": "'''\nGiven an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n'''\nfrom time import time\nfrom typing import List\n\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n res, freq = [], {}\n for n in nums:\n freq[n] = 1 + freq.get(n, 0)\n\n for _ in range(k):\n # Get number corresponding to max count then delete\n num = max(freq, key=freq.get)\n res.append(num)\n del freq[num]\n\n return res\n\n def reference(self, nums: List[int], k: int) -> List[int]:\n count = {}\n freq = [[] for i in range(len(nums) + 1)]\n\n for n in nums:\n count[n] = 1 + count.get(n, 0)\n for n, c in count.items():\n freq[c].append(n)\n\n res = []\n for i in range(len(freq) - 1, 0, -1):\n for n in freq[i]:\n res.append(n)\n if len(res) == k:\n return res\n\n def quantify(self, test_cases, runs=50000):\n sol_start = time()\n for i in range(runs):\n for case in test_cases:\n if i == 0:\n print(self.topKFrequent(*case))\n else:\n self.topKFrequent(*case)\n print(f'Runtime for our solution: {time() - sol_start}\\n')\n\n ref_start = time()\n for i in range(0, runs):\n for case in test_cases:\n if i == 0:\n print(self.reference(*case))\n else:\n self.reference(*case)\n print(f'Runtime for reference: {time() - ref_start}')\n\n\nif __name__ == '__main__':\n test = Solution()\n test_cases = [([1, 1, 1, 2, 2, 3], 2), ([1], 1)]\n test.quantify(test_cases)\n", "sub_path": "Blind 75/01 - Arrays and Hashing/347-top-k-frequent-elements.py", "file_name": "347-top-k-frequent-elements.py", "file_ext": "py", "file_size_in_byte": 1802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "503743905", "text": "\"\"\"\r\nAssignment 4B - Machine Learning\r\nBy: David Walesby - 000732130\r\nPurpose: To implement a multilayer perceptron classifier\r\n\"\"\"\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport csv\r\nimport numpy as np\r\nimport random\r\nfrom sklearn import tree\r\nfrom sklearn.preprocessing import normalize\r\n\r\n## Reads in the datafile and returns the arrays\r\ndef ReadFile(fileName):\r\n trainingData = []\r\n trainingLabels = []\r\n testingData = []\r\n testingLabels = []\r\n\r\n with open(fileName) as file:\r\n csv_reader = csv.reader(file , delimiter=\",\")\r\n line_count = 0\r\n for row in csv_reader:\r\n randomNumber = random.randint(1 , 101)\r\n if randomNumber > 25:\r\n testingData.append(row)\r\n else:\r\n trainingData.append(row)\r\n line_count += 1\r\n print(f'Processed {line_count} lines.')\r\n\r\n trainingData = np.array(trainingData, dtype=np.float32)\r\n testingData = np.array(testingData, dtype=np.float32)\r\n trainingLabels = trainingData[:,-1]\r\n testingLabels = testingData[:,-1]\r\n trainingData = np.delete(trainingData,-1, axis=1)\r\n testingData = np.delete(testingData,-1, axis=1)\r\n return trainingData, trainingLabels, testingData, testingLabels\r\n\r\n## Runs the data for classification through a decision tree classifier and a Multi Layer Perceptron classifier and displays the results\r\ndef RunTests(normalizedTrainingData, trainingLabels, normalizedTestingData, testingLabels, fileName):\r\n clf = tree.DecisionTreeClassifier()\r\n clf = clf.fit(normalizedTrainingData, trainingLabels)\r\n decisionPrediction = clf.predict(normalizedTestingData)\r\n decisionCorrect = (decisionPrediction == testingLabels).sum()\r\n decisionTreeAccuracy = decisionCorrect/len(decisionPrediction)*100\r\n\r\n mlpPerceptron = MLPClassifier(hidden_layer_sizes= 15,max_iter=250, learning_rate_init=0.17)\r\n mlpPerceptron.fit(normalizedTrainingData,trainingLabels)\r\n mlpPrediction = mlpPerceptron.predict(normalizedTestingData)\r\n mlpCorrect = (mlpPrediction == testingLabels).sum()\r\n mlpAccuracy = mlpCorrect/len(mlpPrediction)*100\r\n print()\r\n print(f'{fileName}')\r\n print(\"------------------------------------------------------------------------\")\r\n print(f'Accuracy Tree: {round(decisionTreeAccuracy,1)}%')\r\n print(f'Accuracy MLP: {round(mlpAccuracy,1)}%')\r\n print(f'{mlpPerceptron.get_params()}')\r\n print()\r\n\r\nclf = tree.DecisionTreeClassifier()\r\n\r\n## Store file information\r\ntrainingData1, trainingLabels1, testingData1, testingLabels1 = ReadFile(\"000732130_1.csv\")\r\ntrainingData2, trainingLabels2, testingData2, testingLabels2 = ReadFile(\"000732130_2.csv\")\r\ntrainingData3, trainingLabels3, testingData3, testingLabels3 = ReadFile(\"000732130_3.csv\")\r\ntrainingData4, trainingLabels4, testingData4, testingLabels4 = ReadFile(\"000732130_4.csv\")\r\ntrainingData5, trainingLabels5, testingData5, testingLabels5 = ReadFile(\"dexter.csv\")\r\n\r\n## Normalize the training data\r\nnormalizedTrainingData1 = normalize(trainingData1, axis=0, norm='max')\r\nnormalizedTrainingData2 = normalize(trainingData2, axis=0, norm='max')\r\nnormalizedTrainingData3 = normalize(trainingData3, axis=0, norm='max')\r\nnormalizedTrainingData4 = normalize(trainingData4, axis=0, norm='max')\r\nnormalizedTrainingData5 = normalize(trainingData5, axis=0, norm='max')\r\n\r\n## Normalize the testing data\r\nnormalizedTestingData1 = normalize(testingData1, axis=0, norm='max')\r\nnormalizedTestingData2 = normalize(testingData2, axis=0, norm='max')\r\nnormalizedTestingData3 = normalize(testingData3, axis=0, norm='max')\r\nnormalizedTestingData4 = normalize(testingData4, axis=0, norm='max')\r\nnormalizedTestingData5 = normalize(testingData5, axis=0, norm='max')\r\n\r\n## Run tests\r\nRunTests(normalizedTrainingData1, trainingLabels1, normalizedTestingData1, testingLabels1,\"000732130_1.csv\")\r\nRunTests(normalizedTrainingData2, trainingLabels2, normalizedTestingData2, testingLabels2,\"000732130_2.csv\")\r\nRunTests(normalizedTrainingData3, trainingLabels3, normalizedTestingData3, testingLabels3,\"000732130_3.csv\")\r\nRunTests(normalizedTrainingData4, trainingLabels4, normalizedTestingData4, testingLabels4,\"000732130_4.csv\")\r\nRunTests(normalizedTrainingData5, trainingLabels5, normalizedTestingData5, testingLabels5,\"dexter.csv\")\r\n", "sub_path": "assignment4b.py", "file_name": "assignment4b.py", "file_ext": "py", "file_size_in_byte": 4303, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "csv.reader", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.delete", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 42, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 61, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 61, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "627997238", "text": "import math, collections\nfrom collections import defaultdict\n\nclass BackoffModel:\n\n def __init__(self, corpus):\n \"\"\"Initialize your data structures in the constructor.\"\"\"\n self.unigramCounts = defaultdict(lambda: 0)\n self.table = defaultdict(lambda: defaultdict(int))\n self.words = set([])\n self.total = 0\n self.train(corpus)\n\n def train(self, corpus):\n \"\"\" Takes a corpus and trains your language model.\n Compute any counts or other corpus statistics in this function.\n \"\"\"\n for sentence in corpus.corpus:\n prevWord = None\n for datum in sentence.data:\n token = datum.word\n self.table[prevWord][token] = self.table[prevWord][token] + 1\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n self.words.add(token)\n prevWord = token\n\n def score(self, sentence):\n \"\"\" Takes a list of strings as argument and returns the log-probability of the\n sentence using your language model. Use whatever data you computed in train() here.\n \"\"\"\n score = 0.0\n prevWord = None\n vocab = len(self.words)\n for token in sentence:\n occurances = self.table[prevWord][token]\n countPrev = self.unigramCounts[prevWord]\n\n probability = float(occurances) / (float(countPrev) + vocab)\n\n #Test results of bigram\n if probability > 0:\n score += math.log(probability)\n else: #Back off to unigram\n count = self.unigramCounts[token]\n if count > 0:\n score += math.log(count)\n score -= math.log(self.total)\n\n prevWord = token\n return abs(score)\n", "sub_path": "hw1/BackoffModel.py", "file_name": "BackoffModel.py", "file_ext": "py", "file_size_in_byte": 1621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "collections.defaultdict", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 9, "usage_type": "call"}, {"api_name": "math.log", "line_number": 43, "usage_type": "call"}, {"api_name": "math.log", "line_number": 47, "usage_type": "call"}, {"api_name": "math.log", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "263269663", "text": "'''\nROC plot of a dataset\n'''\nimport argparse\nimport pickle\nimport numpy as np\nfrom scipy import interp\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\ndef roc_process(roc_list, mean_fpr):\n '''\n Average the data over folds\n '''\n tprs = []\n aucs = [] \n i = 0\n for roc in roc_list:\n fpr = roc['fpr']\n tpr = roc['tpr']\n auc_value = roc['auc']\n tprs.append(interp(mean_fpr, fpr, tpr))\n aucs.append(auc_value)\n tprs[-1][0] = 0.0\n #plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, auc))\n i+=1\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n return mean_tpr, mean_auc \n\n\nif __name__==\"__main__\":\n result_dir_0 = './results/roc_erneg_use_structual_features_True_p_value_0.05.pickle' \n result_dir_1 = './results/roc_erpos_use_structual_features_True_p_value_0.05.pickle'\n \n result_file_0 = open(result_dir_0, 'rb')\n roc_list_0 = pickle.load(result_file_0) \n result_file_1 = open(result_dir_1, 'rb')\n roc_list_1 = pickle.load(result_file_1)\n \n mean_fpr = np.linspace(0, 1, 100)\n mean_tpr_0, mean_auc_0 = roc_process(roc_list_0, mean_fpr)\n mean_tpr_1, mean_auc_1 = roc_process(roc_list_1, mean_fpr)\n\n # plot roc curve of random\n\n plt.plot(mean_fpr, mean_tpr_1, dashes = [6, 1, 1, 1, 1, 1], color='g', label='ER+', lw=2, alpha=.8)\n plt.plot(mean_fpr, mean_tpr_0, dashes = [6, 1, 1, 1], color='b', label='ER-', lw=2, alpha=.8)\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random', alpha=.8)\n print('AUC for ER+:', mean_auc_1)\n print('AUC for ER-:', mean_auc_0)\n\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()", "sub_path": "p_value_classification/roc_machailidou.py", "file_name": "roc_machailidou.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "scipy.interp", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 29, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 38, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "421082667", "text": "import django\ndjango.setup()\nfrom core.bo.curso import get_cursos\nfrom core.bo.sala import get_salas\nfrom django.test import TestCase\nfrom core.tests.povoar_testes import criar_dados, remover_dados\nfrom core.dao.centro_dao import get_centro_by_id, get_centros\nfrom core.dao.componente_dao import get_componentes_by_depto\nfrom core.dao.departamento_dao import get_depto_by_id, get_departamentos\n\n\nclass DAOTests(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n print('\\nDAOTests')\n criar_dados()\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n remover_dados()\n\n def test_get_centros(self):\n centros = get_centros()\n\n self.assertIsNotNone(centros, 'Testando centros')\n self.assertTrue(len(centros) > 0, 'Testando centros')\n\n def test_get_ceres(self):\n id_centro = 9999\n codigo = 9999\n sigla = 'CTESTE'\n nome = 'Centro de Teste'\n endereco = 'Rua Joaquim Gregório, Penedo, Caicó - RN'\n site = 'https://www.ceres.ufrn.br/'\n\n centro = get_centro_by_id(9999)\n\n self.assertEqual(id_centro, centro.id_unidade, 'Testando Id Unidade')\n self.assertEqual(codigo, centro.codigo, 'Testando Código')\n self.assertEqual(sigla, centro.sigla, 'Testando Sigla')\n self.assertEqual(nome, centro.nome, 'Testando Nome')\n self.assertEqual(endereco, centro.endereco, 'Testando Endereço')\n self.assertEqual(site, centro.site, 'Testando Site')\n\n centro = get_centro_by_id(6666)\n self.assertIsNone(centro)\n\n def test_get_centro(self):\n id_centro = 9999\n codigo = 9999\n sigla = 'CTESTE'\n nome = 'Centro de Teste'\n endereco = 'Rua Joaquim Gregório, Penedo, Caicó - RN'\n site = 'https://www.ceres.ufrn.br/'\n\n centro = get_centro_by_id(id_centro)\n\n self.assertEqual(id_centro, centro.id_unidade, 'Testando Id Unidade')\n self.assertEqual(codigo, centro.codigo, 'Testando Código')\n self.assertEqual(sigla, centro.sigla, 'Testando Sigla')\n self.assertEqual(nome, centro.nome, 'Testando Nome')\n self.assertEqual(endereco, centro.endereco, 'Testando Endereço')\n self.assertEqual(site, centro.site, 'Testando Site')\n\n def test_get_deptos_centro(self):\n deptos = get_departamentos()\n\n self.assertIsNotNone(deptos, 'Testando departamentos dos centros')\n self.assertTrue(len(deptos) > 0, 'Testando qtd departamentos')\n\n def test_get_componentes_by_depto(self):\n depto = get_depto_by_id(9998)\n ccs = get_componentes_by_depto(depto)\n\n self.assertEqual(4, len(ccs), 'Testando componentes')\n\n def test_get_salas(self):\n salas = get_salas()\n\n self.assertIsNotNone(salas, 'Testando salas')\n self.assertTrue(len(salas) > 0, 'Testando salas')\n\n def test_get_salas(self):\n cursos = get_cursos()\n\n self.assertIsNotNone(cursos, 'Testando cursos')\n self.assertTrue(len(cursos) > 0, 'Testando cursos')\n", "sub_path": "core/tests/test_dao.py", "file_name": "test_dao.py", "file_ext": "py", "file_size_in_byte": 3063, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.setup", "line_number": 2, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "core.tests.povoar_testes.criar_dados", "line_number": 18, "usage_type": "call"}, {"api_name": "core.tests.povoar_testes.remover_dados", "line_number": 23, "usage_type": "call"}, {"api_name": "core.dao.centro_dao.get_centros", "line_number": 26, "usage_type": "call"}, {"api_name": "core.dao.centro_dao.get_centro_by_id", "line_number": 39, "usage_type": "call"}, {"api_name": "core.dao.centro_dao.get_centro_by_id", "line_number": 48, "usage_type": "call"}, {"api_name": "core.dao.centro_dao.get_centro_by_id", "line_number": 59, "usage_type": "call"}, {"api_name": "core.dao.departamento_dao.get_departamentos", "line_number": 69, "usage_type": "call"}, {"api_name": "core.dao.departamento_dao.get_depto_by_id", "line_number": 75, "usage_type": "call"}, {"api_name": "core.dao.componente_dao.get_componentes_by_depto", "line_number": 76, "usage_type": "call"}, {"api_name": "core.bo.sala.get_salas", "line_number": 81, "usage_type": "call"}, {"api_name": "core.bo.curso.get_cursos", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "87974004", "text": "\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndata = pd.read_csv('LinearRegression5_Data.csv')\nprint(data)\n\nprint(data.shape) #(6,2)\n\nX = data.iloc[ : , 0:1].values # [ rows , cols ]\ny = data.iloc[:, 1].values\n\nprint(\"X.shape = \", X.shape , \"\\n X=\\n\" , X)\n\nprint(\"y.shape = \", y.shape , \"\\n y=\" , y )\n\n\nfrom sklearn.linear_model import LinearRegression\n\nlin = LinearRegression()\nlin.fit(X, y)\ny_dash = lin.predict(X)\n\nplt.scatter(X, y, color='blue')\nplt.plot(X, y_dash , color='red')\nplt.title('Linear Regression')\nplt.xlabel('Engine Temperature')\nplt.ylabel('Engine Pressure')\n\nplt.show()\n\n# =========================================================\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\npoly = PolynomialFeatures(degree=4)\nX_poly = poly.fit_transform(X)\n\n#poly.fit(X_poly, y)\nlin2 = LinearRegression()\nlin2.fit(X_poly, y)\n\n\nplt.scatter(X, y, color='blue')\n\nplt.plot(X, lin2.predict(poly.fit_transform(X)), color='red')\nplt.title('Polynomial Regression')\nplt.xlabel('Engine Temperature')\nplt.ylabel('Engine Pressure')\n\nplt.show()\n\n# Predicting a new result with Linear Regression\nprint( \"LinearRegresion: \", lin.predict([[110.0]]) )\n\n# Predicting a new result with Polynomial Regression\nprint( \"PolynomialRegresion: \",lin2.predict(poly.fit_transform([[110.0]])) )\n", "sub_path": "5. machine learning/2. Supervised Machine Learning/2. Polynomial/1. polynomial features.py", "file_name": "1. polynomial features.py", "file_ext": "py", "file_size_in_byte": 1335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "362764089", "text": "\"\"\":\n FunFacts _urls.py\n\"\"\"\n\n\nfrom django.conf.urls import url,include\nfrom . import views\n\nurlpatterns = [\n #url(r'^admin/', admin.site.urls),\n url(r'^$', views.index),\n url(r'^funfacts_process$', views.funfacts_process),\n url(r'^funfacts$', views.funfacts),\n]\n", "sub_path": "funfacts/apps/funfacts_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "463921828", "text": "import my_log\nimport loader_from_file as lfl\nimport re\nimport config\nimport parser_command.command as p\n\nLOG = my_log.get_logger('update')\n\n\ndef update(words):\n num = None\n download = False\n\n company, privileged = p.name_and_priviledget(words)\n\n count_words = words.__len__()\n if count_words > 1:\n if re.compile(r'[0-9]+').match(words[1]):\n num = int(words[1])\n elif words[count_words - 1] in config.CMD_DOWNLOAD_FILES:\n company = ' '.join(words[1:count_words - 1])\n download = True\n else:\n company = ' '.join(words[1:count_words])\n\n LOG.info(\"Update %s files and %s download\" % (str(num) if num is not None else company, str(download)))\n if company is None or len(company) == 0:\n lfl.load_stocks(num, download)\n else:\n lfl.update_stock_from_file(company, download, privileged)\n\n\ndef update_metainfo():\n lfl.load_all()\n return config.RSP_UPDATE_METAINFO\n", "sub_path": "bot/updater.py", "file_name": "updater.py", "file_ext": "py", "file_size_in_byte": 963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "my_log.get_logger", "line_number": 7, "usage_type": "call"}, {"api_name": "parser_command.command.name_and_priviledget", "line_number": 14, "usage_type": "call"}, {"api_name": "parser_command.command", "line_number": 14, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 18, "usage_type": "call"}, {"api_name": "config.CMD_DOWNLOAD_FILES", "line_number": 20, "usage_type": "attribute"}, {"api_name": "loader_from_file.load_stocks", "line_number": 28, "usage_type": "call"}, {"api_name": "loader_from_file.update_stock_from_file", "line_number": 30, "usage_type": "call"}, {"api_name": "loader_from_file.load_all", "line_number": 34, "usage_type": "call"}, {"api_name": "config.RSP_UPDATE_METAINFO", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "131936425", "text": "#\n# Copyright (c) 2016, Novartis Institutes for BioMedical Research Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met: \n#\n# * Redistributions of source code must retain the above copyright \n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following \n# disclaimer in the documentation and/or other materials provided \n# with the distribution.\n# * Neither the name of Novartis Institutes for BioMedical Research Inc. \n# nor the names of its contributors may be used to endorse or promote \n# products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Created by Nadine Schneider, June 2016\n\n\nimport numpy as np\nimport pandas as pd\nimport copy\nimport re\nfrom rdkit.Chem import PandasTools\nfrom IPython.display import SVG\n\n# generate an HTML table of the svg images to visulize them nicely in the Jupyter notebook \nPandasTools.RenderImagesInAllDataFrames(images=True)\ndef drawSVGsToHTMLGrid(svgs, cssTableName='default', tableHeader='', namesSVGs=[], size=(150,150), numColumns=4, numRowsShown=2, noHeader=False):\n rows=[]\n names=copy.deepcopy(namesSVGs)\n rows = [SVG(i).data if i.startswith(' 0:\n rows+=['']*(numColumns-x)\n d+=1\n if len(names)>0:\n names+=['']*(numColumns-x)\n rows=np.array(rows).reshape(d,numColumns)\n finalRows=[]\n if len(names)>0:\n names = np.array(names).reshape(d,numColumns)\n for r,n in zip(rows,names):\n finalRows.append(r)\n finalRows.append(n)\n d*=2\n else:\n finalRows=rows\n\n headerRemove = int(max(numColumns,d))\n df=pd.DataFrame(finalRows)\n\n style = '\\n'\n if not noHeader:\n style += '
'+str(tableHeader)+'
\\n'\n style += '
\\n'\n dfhtml=style+df.to_html()+'\\n
\\n'\n dfhtml=dfhtml.replace('class=\"dataframe\"','class=\"'+cssTableName+'\"')\n dfhtml=dfhtml.replace('','')\n for i in range(0,headerRemove):\n dfhtml=dfhtml.replace(''+str(i)+'','')\n return dfhtml\n\n# build an svg grid image to print\ndef SvgsToGrid(svgs, labels, svgsPerRow=4,molSize=(250,150),fontSize=12):\n \n matcher = re.compile(r'^(<.*>\\n)(\\n)(.*)',re.DOTALL) \n hdr='' \n ftr='' \n rect='' \n nRows = len(svgs)//svgsPerRow \n if len(svgs)%svgsPerRow : nRows+=1 \n blocks = ['']*(nRows*svgsPerRow)\n labelSizeDist = fontSize*5\n fullSize=(svgsPerRow*(molSize[0]+molSize[0]/10.0),nRows*(molSize[1]+labelSizeDist))\n print(fullSize)\n\n count=0\n for svg,name in zip(svgs,labels):\n h,r,b = matcher.match(svg).groups()\n if not hdr: \n hdr = h.replace(\"width='\"+str(molSize[0])+\"px'\",\"width='%dpx'\"%fullSize[0])\n hdr = hdr.replace(\"height='\"+str(molSize[1])+\"px'\",\"height='%dpx'\"%fullSize[1])\n if not rect: \n rect = r\n legend = '\\n'\n legend += ''+name.split('|')[0]+'\\n'\n if len(name.split('|')) > 1:\n legend += ''+name.split('|')[1]+'\\n'\n legend += '\\n'\n blocks[count] = b + legend\n count+=1\n\n for i,elem in enumerate(blocks): \n row = i//svgsPerRow \n col = i%svgsPerRow \n elem = rect+elem \n blocks[i] = '%s'%(col*(molSize[0]+molSize[0]/10.0),row*(molSize[1]+labelSizeDist),elem) \n res = hdr + '\\n'.join(blocks)+ftr \n return res \n", "sub_path": "ChemTopicModel/utilsDrawing.py", "file_name": "utilsDrawing.py", "file_ext": "py", "file_size_in_byte": 5777, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "rdkit.Chem.PandasTools.RenderImagesInAllDataFrames", "line_number": 42, "usage_type": "call"}, {"api_name": "rdkit.Chem.PandasTools", "line_number": 42, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "IPython.display.SVG", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 92, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "461593840", "text": "import csv\nimport matplotlib.pyplot as plt\n\n\ncsvread = csv.reader(open('final_author_coauthor.csv','r'))\ncsvread2 = csv.reader(open('final_author_percitation_bk.csv','r'))\n\npaperlist = {}\npaperlist2 = {}\n\nfor row in csvread: \n\tpaperlist[int(row[0])] = int(row[2])\n\tpaperlist[int(row[1])] = int(row[2])\n\nfor row in csvread2: \n\tpaperlist2[int(row[0])] = int(row[2])\n\nkey1 = paperlist.keys()\nkey2 = paperlist2.keys()\n\nprint(len(key1))\nprint(len(key2))\ndiff = list(set(key2)-set(key1))\n\nprint (len(diff))\nplot = []\nfor key in diff:\n\tplot.append(key)\n\nplt.plot(plot,'b.')\n# plt.hist(plot,bins=6)\nplt.show()\n", "sub_path": "reduced/datasetcomparer.py", "file_name": "datasetcomparer.py", "file_ext": "py", "file_size_in_byte": 602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "csv.reader", "line_number": 5, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "53654862", "text": "import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch import tensor\n\nfrom nn.models import Encoder, Decoder, Classifier\nfrom plots.scatter_plot_test import plot\nimport constants_cmap\nfrom datasets import datasets\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nimport matplotlib.cm as cm\nimport matplotlib.colors as ml_colors\nfrom matplotlib.lines import Line2D\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import train_test_split\n\ndef knn(X_train,y_train, X_test, y_test):\n neigh = KNeighborsClassifier(n_neighbors=10)\n neigh.fit(X_train, y_train)\n score=neigh.score(X_test, y_test)\n print(score)\n\n return score\n\n\ndef plot_bu(model, test_loader, device, suffix, path_to_save, dataset_names, colormap, bg_color):\n zs=tensor([])\n mus=tensor([])\n logvars=tensor([])\n labels=tensor([]).long()\n\n for batch_idx, (data, label) in enumerate(test_loader):\n data = data.to(device)\n z, mu, logvar, _ = model(data)\n zs=torch.cat((zs, z), 0)\n mus=torch.cat((mus, mu), 0)\n logvars=torch.cat((logvars, logvar), 0)\n labels=torch.cat((labels, label), 0)\n\n # xs,ys=list(zip(*zs.cpu().numpy()))\n zs=zs.cpu().numpy()\n labels=labels.cpu().numpy()\n\n # np.save(os.path.join(path_to_save, \"latent_features{}.npy\".format(suffix)), np.hstack([zs, labels.reshape(-1,1), [[dataset_names[a]] for a in labels]]))\n X_pca = zs # PCA(n_components=2).fit_transform(zs)\n\n limit=10\n labels=np.array([labels[i] for i, a in enumerate(zs) if np.abs(a[0]) \" + ETC_HOSTNAME )\n\nif confHostname:\n with open(ETC_HOSTNAME, 'w') as f:\n f.write(confHostname + '\\n')\n f.close()\n\n with open(ETC_HOSTS, 'r') as fp:\n lines = fp.read().split(\"\\n\")\n fp.close()\n\n with open(ETC_HOSTS, 'w') as fp: \n for i in lines:\n if '127.0.1.1' in i and DOMAIN_NAME in i:\n #print ('127.0.1.1\\t' + confHostname)\n fp.write ('127.0.1.1\\t' + confHostname+'\\n')\n else:\n if i:\n #print (i) \n fp.write (i+'\\n') \n fp.close()\n #os.system('sudo shutdown -r now')\n os.system('hostnamectl set-hostname ' + confHostname)\n", "sub_path": "hostnamer.py", "file_name": "hostnamer.py", "file_ext": "py", "file_size_in_byte": 1517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "uuid.getnode", "line_number": 13, "usage_type": "call"}, {"api_name": "json.load", "line_number": 17, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.system", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "270395771", "text": "from ortools.linear_solver import pywraplp\nimport networkx as nx\nfrom shapely.geometry import Point, LineString, MultiLineString\nimport os\nimport geojson\n\n\nclass RouteOptimizer():\n def __init__(self, trail_network, mindist = 0, maxdist = 100):\n \"\"\"\n This is a mixed-integer linear program. It will maximize distance\n such that each node is gone through symetrically from either side\n \"\"\"\n # Make Path object a more callable object -- Fix all this\n self.trail_network = trail_network\n self.mindist = mindist\n self.maxdist = maxdist\n self.variables = {}\n self.path_groups = {}\n self.group_vars = {}\n self.group_list = []\n self.starting_trails = {}\n self.constraints = {}\n self.solver = None\n self.objective = None\n self.results = None\n self.node_variables = {}\n self.edge_limit = {}\n \n\n def set_trip_length(self, mindist, maxdist):\n self.mindist = mindist\n self.maxdist = maxdist\n self.set_distance_constraint()\n \n\n def setup_solver(self):\n self.solver = pywraplp.Solver('Backpack Trip Planner',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n \n self.objective = self.solver.Objective()\n self.objective.SetMaximization()\n \n\n def setup_variables(self):\n \"\"\"\n Each path is setup as an integer variable. It can either be 0 or 1.\n Paths can go from Origin_to_Destination, or Destnation_to_Origin\n \"\"\"\n \n self.set_distance_constraint()\n\n start = self.constraints[\"start_node\"] = self.solver.Constraint(0, 1) \n for path in self.trail_network.edges(data=True):\n pathwaycons = self.constraints[path[2][\"name\"]] = self.solver.Constraint(0, 1)\n pathd = path[2][\"length\"]\n constraint = self.constraints[\"Trip Distance\"]\n forward = (path[0],path[1], path[2][\"name\"])\n reverse = (path[1],path[0], path[2][\"name\"])\n \n # Add the node variables\n if path[0] not in self.node_variables:\n node1 = self.node_variables[path[0]] = self.solver.IntVar(0,1,\"node_var\"+str(path[0]))\n start.SetCoefficient(node1, 1)\n \n if path[1] not in self.node_variables:\n node2 = self.node_variables[path[1]] = self.solver.IntVar(0,1,\"node_var\"+str(path[1]))\n start.SetCoefficient(node2, 1)\n \n \n \n #Had previously set values at 2, not sure why?\n self.variables[forward] = self.solver.IntVar(0, 1, \"forward_\"+str(forward))\n self.variables[reverse] = self.solver.IntVar(0, 1, \"reverse_\"+str(reverse))\n \n # Add constraints so a pathway can go either forward or backward\n pathwaycons.SetCoefficient(self.variables[forward], 1)\n pathwaycons.SetCoefficient(self.variables[reverse], 1)\n\n # Add distances to the total distance constraint\n constraint.SetCoefficient(self.variables[forward], pathd)\n constraint.SetCoefficient(self.variables[reverse], pathd)\n \n # Add distances to objective function\n self.objective.SetCoefficient(self.variables[forward], pathd)\n self.objective.SetCoefficient(self.variables[reverse], pathd)\n \n\n def set_node_constraints(self):\n \"\"\"\n Each Pathway represents leaving a node or joining a node.\n All nodes must stay at 0, otherwise it is impossible to return to\n your origin\n \"\"\"\n \n # Have each node be a variable (Start Node) <-- Done: X\n # Constraint: Only have 1 start-node\n # Node Coefficient: 1 for Node Variable\n #Pathway Constraints Below can be 0 or 1\n # Start constraint prevents a -1\n # Pathway in single direction prevents doubling back\n \n if not self.variables:\n raise Exception(\"Pathway variables need to be setup first\")\n\n for pathway in self.variables:\n intvar = self.variables[pathway]\n\n \n if pathway[0] not in self.constraints:\n self.constraints[pathway[0]] = self.solver.Constraint(0, 1)\n edge1 = self.edge_limit[pathway[0]] = self.solver.Constraint(0,2)\n \n \n if pathway[1] not in self.constraints:\n self.constraints[pathway[1]] = self.solver.Constraint(0, 1)\n edge2 = self.edge_limit[pathway[1]] = self.solver.Constraint(0,2)\n \n node1 = self.constraints[pathway[0]]\n node2 = self.constraints[pathway[1]]\n edge1 = self.edge_limit[pathway[0]]\n edge2 = self.edge_limit[pathway[1]]\n \n \n node1.SetCoefficient(intvar, 1)\n node2.SetCoefficient(intvar, -1)\n edge1.SetCoefficient(intvar, 1)\n edge2.SetCoefficient(intvar, 1)\n \n # Allow start_condition to add a +1\n node1.SetCoefficient(self.node_variables[pathway[0]],1)\n node2.SetCoefficient(self.node_variables[pathway[1]],1)\n \n\n def set_distance_constraint(self):\n if \"Distance\" not in self.constraints:\n self.constraints[\"Trip Distance\"] = self.solver.Constraint(self.mindist, self.maxdist)\n else:\n self.constraints[\"Trip Distance\"].SetBounds(self.mindist, self.maxdist)\n \n \n def establish_groups(self):\n \"\"\"\n Create list that keeps track of which group \n (connnected component) each node belongs to\n \"\"\"\n d = list(self.trail_network.subgraph(c) for c in nx.connected_components(self.trail_network))\n for i, group in enumerate(d):\n for node in group:\n self.path_groups[node] = i\n self.group_list.append(i)\n \n ''' \n to return more than one trail, we could adjust the number of unique_starts, but we need to\n figure out what the solver is doing exactly and how to best optimize it\n '''\n def set_grouping_constraint(self, unique_starts = 1): \n \"\"\"\n A Constraint that allows only a number of networks equal to [unique_starts] chosen\n in a given area\n \"\"\"\n if not self.path_groups:\n self.establish_groups()\n \n grp_constraint = self.constraints[\"Trail Groups\"] = self.solver.Constraint(0, unique_starts)\n for group in self.group_list:\n grp_id = self.group_vars[group] = self.solver.IntVar(0,1,str(group))\n grp_constraint.SetCoefficient(grp_id, 1)\n \n for path_key in self.variables:\n \"\"\"\n Allows a path to be selected if it falls in the same group as the\n chosen hiking group\n \"\"\"\n grp_id = self.path_groups[path_key[0]]\n identifier = \"constraint_%s\" % str(grp_id)\n \n cons = self.group_vars[identifier] = self.solver.Constraint(0,self.solver.infinity())\n path_var = self.variables[path_key]\n grp_var = self.group_vars[grp_id]\n \n cons.SetCoefficient(path_var,-1)\n cons.SetCoefficient(grp_var, 1)\n \n \n def setup_lp(self):\n self.setup_solver()\n self.setup_variables()\n self.set_node_constraints()\n \n\n def solve(self):\n result_status = self.solver.Solve()\n return result_status\n \n\n def get_results(self):\n results = []\n print(\"Total Trip Length: %s km\" % self.objective.Value())\n for key in self.variables:\n intvar = self.variables[key]\n if intvar.solution_value() > 0:\n results.append(key)\n \n self.results = results\n return results\n\n\n def save_geojson(self,path_object):\n if not self.results:\n self.get_results()\n \n results = self.results\n \n lines = []\n for path_name in results:\n pointlist = []\n path = path_object.get(path_name).points\n if path.type == 'LineString':\n points = path.coords\n else:\n points = path[0].coords\n \n for coord in points:\n pointlist.append(Point([coord[0], coord[1]]))\n lines.append(LineString(pointlist))\n \n geom_in_geojson = geojson.Feature(geometry=MultiLineString(lines), properties={})\n return geojson.dumps(geom_in_geojson)\n \n\n\n \n \n", "sub_path": "app/app/tripopt/tripopt.py", "file_name": "tripopt.py", "file_ext": "py", "file_size_in_byte": 8798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "ortools.linear_solver.pywraplp.Solver", "line_number": 38, "usage_type": "call"}, {"api_name": "ortools.linear_solver.pywraplp", "line_number": 38, "usage_type": "name"}, {"api_name": "ortools.linear_solver.pywraplp.Solver", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ortools.linear_solver.pywraplp", "line_number": 39, "usage_type": "name"}, {"api_name": "networkx.connected_components", "line_number": 147, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 225, "usage_type": "call"}, {"api_name": "shapely.geometry.LineString", "line_number": 226, "usage_type": "call"}, {"api_name": "geojson.Feature", "line_number": 228, "usage_type": "call"}, {"api_name": "shapely.geometry.MultiLineString", "line_number": 228, "usage_type": "call"}, {"api_name": "geojson.dumps", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "45433920", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis class is part of the MacGyver challenge\ninitiated with class variables to use them in the different methods\nall the methods have a quick explanation of their utility and usage\n\"\"\"\n# generic python libraries\nimport pygame as pg\nimport sys\n# own libraries\nimport classes.gameData as gd\nimport config.config as config\nimport classes.imgLoader as imgl\n\nclass GameLogic():\n\n def __init__(self):\n self.collected = 0\n # loading the game from file\n # and initializing game board\n self.board = gd.GameData()\n self.board.get_maps_to_list()\n self.board.find_walls()\n self.board.find_path()\n # randomly distribute objects in the board\n # free path\n self.board.distribute_object()\n\n # some generic values to update the game settings more easily\n # game window title\n pg.display.set_caption(config.SCREEN_TITLE)\n\n # sets the size of the screen of pygamge\n self.screen = pg.display.set_mode((config.SCREEN_W, config.SCREEN_H))\n\n # sets values of the background\n self.screen.fill(config.BACKGR_COLOR)\n\n # sets a font to call it\n pg.font.init()\n objectstxt = pg.font.SysFont('Comic Sans MS', 25, bold=1)\n hypodermictxt = pg.font.SysFont('Comic Sans MS', 25, italic=1)\n self.toptxt = objectstxt.render('Vous avez collecté ces objets ->', False, config.TXT_COLOR)\n self.undertxt = hypodermictxt.render('Vous avez fabriqué la seringue', False, config.TXT_COLOR)\n\n def initiator(self):\n # loads to screen all resources to start\n self.load_resource()\n self.static_to_screen(self.board.wallpositions, self.walls.image)\n self.static_to_screen(self.board.pathpositions, self.path.image)\n self.collectables_to_screen()\n self.macgyver_to_screen()\n self.enemy_to_screen()\n # refresh display\n pg.display.flip()\n\n def load_resource(self):\n # -------------- loads images\n # caracters\n self.macgyver = imgl.ImageLoader(config.MACGYVER_PX)\n self.macgyver_pos = self.board.find_object(config.MACGYVER_POS)\n\n self.enemy = imgl.ImageLoader(config.ENEMY_PX)\n self.enemy_pos = self.board.find_object(config.ENEMY_POS)\n\n # walls\n self.walls = imgl.ImageLoader(config.WALL_PX)\n\n # ground\n self.path = imgl.ImageLoader(config.GROUND_PX)\n\n # Collectables\n self.tube = imgl.ImageLoader(config.TUBE_PX)\n self.needle = imgl.ImageLoader(config.NEEDLE_PX)\n self.aether = imgl.ImageLoader(config.AETHER_PX)\n\n # Final object\n self.hypodermic = imgl.ImageLoader(config.HYPODERMIC_PX)\n\n # -------------- creates lists\n # we add some elements to a list so we can iterate through\n # objects in order to display them\n self.gameObjects = []\n self.gameObjects.append(self.tube)\n self.gameObjects.append(self.needle)\n self.gameObjects.append(self.aether)\n\n '''\n use list and distribute it through the windows using\n coordinates from list multiplied by the size of the\n tile or wall structure\n '''\n # displays repetitive elements to the screen such\n # as walls, pathwalks\n def static_to_screen(self, listofobj, image):\n # display list of objects given\n for pos in listofobj:\n x_px = pos[0] * config.TILE_SIZE\n y_px = pos[1] * config.TILE_SIZE\n self.screen.blit(image, (x_px, y_px))\n\n # displays collectables to the screen\n def collectables_to_screen(self):\n # distributes objects into the maze\n\n for pos, coord in enumerate(self.board.objects_pos):\n x_px = coord[0] * config.TILE_SIZE\n y_px = coord[1] * config.TILE_SIZE\n self.screen.blit(self.gameObjects[pos].image, (x_px, y_px))\n\n # displays macgyver to the screen\n def macgyver_to_screen(self):\n self.screen.blit(\n self.macgyver.image, (\n self.macgyver_pos[0] * config.TILE_SIZE,\n self.macgyver_pos[1] * config.TILE_SIZE))\n\n # displays enemy to the screen\n def enemy_to_screen(self):\n self.screen.blit(\n self.enemy.image, (\n self.enemy_pos[0] * config.TILE_SIZE,\n self.enemy_pos[1] * config.TILE_SIZE))\n\n def getPlayerinput(self):\n # gets keys pressed by user and returns them as tuple\n self.up = pg.key.get_pressed()[pg.K_UP]\n self.down = pg.key.get_pressed()[pg.K_DOWN]\n self.left = pg.key.get_pressed()[pg.K_LEFT]\n self.right = pg.key.get_pressed()[pg.K_RIGHT]\n\n # method to move MacGyver in the maze and update screen\n def setPlayerposition(self):\n self.getPlayerinput()\n if self.macgyver_pos in self.board.pathpositions:\n if self.right and self.board.checkwall((self.macgyver_pos[0]+1, self.macgyver_pos[1])):\n self.macgyver_pos = (self.macgyver_pos[0] + 1, self.macgyver_pos[1])\n\n elif self.left and self.board.checkwall((self.macgyver_pos[0]-1, self.macgyver_pos[1])):\n self.macgyver_pos = (self.macgyver_pos[0] - 1, self.macgyver_pos[1])\n\n elif self.up and self.board.checkwall((self.macgyver_pos[0], self.macgyver_pos[1]-1)):\n self.macgyver_pos = (self.macgyver_pos[0], self.macgyver_pos[1]-1)\n\n elif self.down and self.board.checkwall((self.macgyver_pos[0], self.macgyver_pos[1]+1)):\n self.macgyver_pos = (self.macgyver_pos[0], self.macgyver_pos[1]+1)\n\n if self.collected >= 3:\n self.enemy.image = self.hypodermic.image\n \n if self.macgyver_pos in self.board.objects_pos:\n index = self.board.objects_pos.index(self.macgyver_pos)\n self.board.objects_pos[index] = (10+self.collected, 0)\n self.collected += 1\n #self.board.objects_pos.remove(self.macgyver_pos)\n elif self.macgyver_pos == self.enemy_pos:\n if self.collected >= 3:\n print(\"tous les objects ont été ramassées et l'enemi endormi, jeux terminé !\")\n sys.exit()\n else:\n print(\"Vous avez été tué ! Too bad.\")\n sys.exit()\n\n # render objects\n self.static_to_screen(self.board.wallpositions, self.walls.image)\n self.static_to_screen(self.board.pathpositions, self.path.image)\n self.macgyver_to_screen()\n self.enemy_to_screen()\n self.collectables_to_screen()\n self.screen.blit(self.toptxt, (0, 0))\n\n if self.collected >= 3:\n self.screen.blit(self.undertxt, (0, 30))\n", "sub_path": "classes/gameLogic.py", "file_name": "gameLogic.py", "file_ext": "py", "file_size_in_byte": 6722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "classes.gameData.GameData", "line_number": 22, "usage_type": "call"}, {"api_name": "classes.gameData", "line_number": 22, "usage_type": "name"}, {"api_name": "pygame.display.set_caption", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 32, "usage_type": "attribute"}, {"api_name": "config.config.SCREEN_TITLE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 32, "usage_type": "name"}, {"api_name": "pygame.display.set_mode", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.config.SCREEN_W", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 35, "usage_type": "name"}, {"api_name": "config.config.SCREEN_H", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.config.BACKGR_COLOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 38, "usage_type": "name"}, {"api_name": "pygame.font.init", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 43, "usage_type": "attribute"}, {"api_name": "config.config.TXT_COLOR", "line_number": 44, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 44, "usage_type": "name"}, {"api_name": "config.config.TXT_COLOR", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 45, "usage_type": "name"}, {"api_name": "pygame.display.flip", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 56, "usage_type": "attribute"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 61, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 61, "usage_type": "name"}, {"api_name": "config.config.MACGYVER_PX", "line_number": 61, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 61, "usage_type": "name"}, {"api_name": "config.config.MACGYVER_POS", "line_number": 62, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 62, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 64, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 64, "usage_type": "name"}, {"api_name": "config.config.ENEMY_PX", "line_number": 64, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 64, "usage_type": "name"}, {"api_name": "config.config.ENEMY_POS", "line_number": 65, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 65, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 68, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 68, "usage_type": "name"}, {"api_name": "config.config.WALL_PX", "line_number": 68, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 68, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 71, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 71, "usage_type": "name"}, {"api_name": "config.config.GROUND_PX", "line_number": 71, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 71, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 74, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 74, "usage_type": "name"}, {"api_name": "config.config.TUBE_PX", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 74, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 75, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 75, "usage_type": "name"}, {"api_name": "config.config.NEEDLE_PX", "line_number": 75, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 75, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 76, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 76, "usage_type": "name"}, {"api_name": "config.config.AETHER_PX", "line_number": 76, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 76, "usage_type": "name"}, {"api_name": "classes.imgLoader.ImageLoader", "line_number": 79, "usage_type": "call"}, {"api_name": "classes.imgLoader", "line_number": 79, "usage_type": "name"}, {"api_name": "config.config.HYPODERMIC_PX", "line_number": 79, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 79, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 99, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 100, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 100, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 108, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 108, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 109, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 116, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 116, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 117, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 123, "usage_type": "name"}, {"api_name": "config.config.TILE_SIZE", "line_number": 124, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 124, "usage_type": "name"}, {"api_name": "pygame.key.get_pressed", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 160, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "122959745", "text": "from talon.voice import Word, Context, press, Key\nfrom talon import clip\n\nfrom ..utils import (\n insert,\n normalise_keys,\n parse_word,\n surround,\n text,\n sentence_text,\n word,\n parse_words,\n spoken_text,\n)\n\n\ndef title_case_capitalize_word(index, word, _):\n words_to_keep_lowercase = \"a,an,the,at,by,for,in,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n )\n if index == 0 or word not in words_to_keep_lowercase:\n return word.capitalize()\n else:\n return word\n\n\nformatters = normalise_keys(\n {\n \"thrack\": (True, lambda i, word, _: word[0:3] if i == 0 else \"\"),\n \"quattro\": (True, lambda i, word, _: word[0:4] if i == 0 else \"\"),\n \"(cram | camel)\": (\n True,\n lambda i, word, _: word if i == 0 else word.capitalize(),\n ),\n \"pathway\": (True, lambda i, word, _: word if i == 0 else \"/\" + word),\n \"dotsway\": (True, lambda i, word, _: word if i == 0 else \".\" + word),\n \"yellsmash\": (True, lambda i, word, _: word.upper()),\n \"(allcaps | yeller)\": (False, lambda i, word, _: word.upper()),\n \"yellsnik\": (\n True,\n lambda i, word, _: word.upper() if i == 0 else \"_\" + word.upper(),\n ),\n \"dollcram\": (\n True,\n lambda i, word, _: \"$\" + word if i == 0 else word.capitalize(),\n ),\n # \"champ\": (True, lambda i, word, _: word.capitalize() if i == 0 else \" \" + word),\n \"lowcram\": (\n True,\n lambda i, word, _: \"@\" + word if i == 0 else word.capitalize(),\n ),\n \"(criff | criffed)\": (True, lambda i, word, _: word.capitalize()),\n \"dotcriffed\": (True, lambda i, word, _: \".\" + word.capitalize() if i == 0 else word.capitalize()),\n \"tridal\": (False, lambda i, word, _: word.capitalize()),\n \"snake\": (True, lambda i, word, _: word if i == 0 else \"_\" + word),\n \"dotsnik\": (True, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"dot\": (True, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"smash\": (True, lambda i, word, _: word),\n \"(spine | kebab)\": (True, lambda i, word, _: word if i == 0 else \"-\" + word),\n \"title\": (False, title_case_capitalize_word),\n }\n)\n\nsurrounders = normalise_keys(\n {\n \"(surround dubstring | surround coif)\": (False, surround('\"')),\n \"(surround string | surround posh)\": (False, surround(\"'\")),\n \"(surround tics | surround glitch)\": (False, surround(\"`\")),\n \"surround prank\": (False, surround(\" \")),\n \"surround dunder\": (False, surround(\"__\")),\n \"surround angler\": (False, surround(\"<\", \">\")),\n \"surround brisk\": (False, surround(\"[\", \"]\")),\n \"surround kirk\": (False, surround(\"{\", \"}\")),\n \"surround precoif\": (False, surround('(\"', '\")')),\n \"surround prex\": (False, surround(\"(\", \")\")),\n }\n)\n\nformatters.update(surrounders)\n\n\ndef FormatText(m):\n fmt = []\n\n for w in m._words:\n if isinstance(w, Word) and w != \"over\":\n fmt.append(w.word)\n words = parse_words(m)\n if not words:\n try:\n with clip.capture() as s:\n press(\"cmd-c\")\n words = s.get().split(\" \")\n except clip.NoChange:\n words = [\"\"]\n\n tmp = []\n\n smash = False\n for i, w in enumerate(words):\n word = parse_word(w, True)\n for name in reversed(fmt):\n smash, func = formatters[name]\n word = func(i, word, i == len(words) - 1)\n tmp.append(word)\n\n sep = \"\" if smash else \" \"\n insert(sep.join(tmp))\n # if no words, move cursor inside surrounders\n if not words[0]:\n for i in range(len(tmp[0]) // 2):\n press(\"left\")\n\n\n# from ..noise import pop_control as pc\n\nctx = Context(\"formatters\")\n# ctx = Context(\"formatters\", func=lambda app, window: pc.PopControl.mode != pc.PopControl.DICTATION)\nctx.keymap(\n {\n \"phrase [over]\": spoken_text,\n \"phrase [tree]\": [spoken_text, \" tree\"],\n \"phrase [subtree]\": [spoken_text, \" subtree\"],\n\n \"squash [over]\": text,\n \"derek [] [over]\": [\" \", spoken_text],\n \"darren [] [over]\": [Key(\"cmd-right\"), \" \", spoken_text],\n \"(sentence | champ) [over]\": sentence_text,\n \"(comma | ,) [over]\": [\", \", spoken_text],\n \"period [over]\": [\". \", sentence_text],\n \"word \": word,\n \"(%s)+ [] [over]\" % (\" | \".join(formatters)): FormatText,\n # to match surrounder command + another command (i.e. not dgndictation)\n \"(%s)+\" % (\" | \".join(surrounders)): FormatText,\n }\n)\n\n\n", "sub_path": "text/formatters.py", "file_name": "formatters.py", "file_ext": "py", "file_size_in_byte": 4778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "utils.word", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 22, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.normalise_keys", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 35, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 36, "usage_type": "name"}, {"api_name": "utils.word.upper", "line_number": 37, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 37, "usage_type": "name"}, {"api_name": "utils.word.upper", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.word.upper", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 41, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.word.capitalize", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 53, "usage_type": "name"}, {"api_name": "utils.word.capitalize", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 54, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 56, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 58, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 59, "usage_type": "name"}, {"api_name": "utils.normalise_keys", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 69, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.surround", "line_number": 75, "usage_type": "call"}, {"api_name": "talon.voice.Word", "line_number": 86, "usage_type": "argument"}, {"api_name": "utils.parse_words", "line_number": 88, "usage_type": "call"}, {"api_name": "talon.clip.capture", "line_number": 91, "usage_type": "call"}, {"api_name": "talon.clip", "line_number": 91, "usage_type": "name"}, {"api_name": "talon.voice.press", "line_number": 92, "usage_type": "call"}, {"api_name": "talon.clip.NoChange", "line_number": 94, "usage_type": "attribute"}, {"api_name": "talon.clip", "line_number": 94, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 101, "usage_type": "name"}, {"api_name": "utils.parse_word", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.word", "line_number": 104, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 105, "usage_type": "argument"}, {"api_name": "utils.insert", "line_number": 108, "usage_type": "call"}, {"api_name": "talon.voice.press", "line_number": 112, "usage_type": "call"}, {"api_name": "talon.voice.Context", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.spoken_text", "line_number": 121, "usage_type": "name"}, {"api_name": "utils.spoken_text", "line_number": 122, "usage_type": "name"}, {"api_name": "utils.spoken_text", "line_number": 123, "usage_type": "name"}, {"api_name": "utils.text", "line_number": 125, "usage_type": "name"}, {"api_name": "utils.spoken_text", "line_number": 126, "usage_type": "name"}, {"api_name": "talon.voice.Key", "line_number": 127, "usage_type": "call"}, {"api_name": "utils.spoken_text", "line_number": 127, "usage_type": "name"}, {"api_name": "utils.sentence_text", "line_number": 128, "usage_type": "name"}, {"api_name": "utils.spoken_text", "line_number": 129, "usage_type": "name"}, {"api_name": "utils.sentence_text", "line_number": 130, "usage_type": "name"}, {"api_name": "utils.word", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "454259555", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nimport mptt.fields\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.BLANC_PAGES_MODEL),\n ('pages', '0004_rename_tables'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=120, blank=True)),\n ('email', models.EmailField(max_length=70)),\n ('subject', models.CharField(max_length=120)),\n ('content', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='ContactFormBlock',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('recipient', models.EmailField(max_length=254)),\n ('content_block', models.ForeignKey(editable=False, to='pages.ContentBlock', null=True)),\n ('success_page', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.BLANC_PAGES_MODEL, null=True)),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n", "sub_path": "apps/contacts/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "87", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.conf.settings.BLANC_PAGES_MODEL", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "mptt.fields.fields.TreeForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "mptt.fields.fields", "line_number": 35, "usage_type": "attribute"}, {"api_name": "mptt.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.db", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.settings.BLANC_PAGES_MODEL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "438658863", "text": "import streamlit as st\r\nimport tempfile\r\nimport warnings\r\nimport os\r\nfrom PIL import Image\r\nfrom video import *\r\n\r\nwarnings.filterwarnings(\"ignore\", message=r\"Passing\", category=FutureWarning)\r\n\r\n\r\n# hide hamburger menu\r\n# hide_streamlit_style = \"\"\"\r\n# \r\n# \"\"\"\r\n# st.markdown(hide_streamlit_style, unsafe_allow_html=True)\r\n\r\n\r\n# hide footer\r\nhide_footer_style = \"\"\"\r\n